[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#1035059: marked as done (bullseye-pu: package dpdk/20.11.8-1~deb11u1)



Your message dated Sat, 07 Oct 2023 12:41:28 +0100
with message-id <84bb5ff8312f749ebe536897993782bf35aa1977.camel@adam-barratt.org.uk>
and subject line Closing opu requests for updates included in 11.8
has caused the Debian Bug report #1035059,
regarding bullseye-pu: package dpdk/20.11.8-1~deb11u1
to be marked as done.

This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.

(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact owner@bugs.debian.org
immediately.)


-- 
1035059: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1035059
Debian Bug Tracking System
Contact owner@bugs.debian.org with problems
--- Begin Message ---
Package: release.debian.org
Severity: normal
Tags: bullseye
User: release.debian.org@packages.debian.org
Usertags: pu
X-Debbugs-CC: pkg-dpdk-devel@lists.alioth.debian.org

Dear release team,

We would like to upload a new LTS release version of DPDK to Bullseye.
We have already done this previously for Buster and Bullseye, therefore
I already proceeded to upload to bullseye-pu.

As before, the LTS point release has only bug fixes and no API
breakages and has been tested with regression tests.

The source debdiff is attached.

-- 
Kind regards,
Luca Boccassi
diff -Nru dpdk-20.11.7/app/test/packet_burst_generator.c dpdk-20.11.8/app/test/packet_burst_generator.c
--- dpdk-20.11.7/app/test/packet_burst_generator.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test/packet_burst_generator.c	2023-04-27 18:57:22.000000000 +0100
@@ -262,11 +262,11 @@
 		void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
 		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
 {
-	int i, nb_pkt = 0;
-	size_t eth_hdr_size;
-
+	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
+	size_t eth_hdr_size;
+	int i, nb_pkt = 0;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -277,7 +277,7 @@
 			break;
 		}
 
-		pkt->data_len = pkt_len;
+		pkt->data_len = pkt_seg_data_len;
 		pkt_seg = pkt;
 		for (i = 1; i < nb_pkt_segs; i++) {
 			pkt_seg->next = rte_pktmbuf_alloc(mp);
@@ -287,7 +287,10 @@
 				goto nomore_mbuf;
 			}
 			pkt_seg = pkt_seg->next;
-			pkt_seg->data_len = pkt_len;
+			if (i != nb_pkt_segs - 1)
+				pkt_seg->data_len = pkt_seg_data_len;
+			else
+				pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs;
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
@@ -343,11 +346,11 @@
 		uint8_t ipv4, uint8_t proto, void *proto_hdr,
 		int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
 {
-	int i, nb_pkt = 0;
-	size_t eth_hdr_size;
-
+	const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs;
 	struct rte_mbuf *pkt_seg;
 	struct rte_mbuf *pkt;
+	size_t eth_hdr_size;
+	int i, nb_pkt = 0;
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_pktmbuf_alloc(mp);
@@ -358,7 +361,7 @@
 			break;
 		}
 
-		pkt->data_len = pkt_len;
+		pkt->data_len = pkt_seg_data_len;
 		pkt_seg = pkt;
 		for (i = 1; i < nb_pkt_segs; i++) {
 			pkt_seg->next = rte_pktmbuf_alloc(mp);
@@ -368,7 +371,10 @@
 				goto nomore_mbuf;
 			}
 			pkt_seg = pkt_seg->next;
-			pkt_seg->data_len = pkt_len;
+			if (i != nb_pkt_segs - 1)
+				pkt_seg->data_len = pkt_seg_data_len;
+			else
+				pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs;
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
diff -Nru dpdk-20.11.7/app/test/test_cryptodev.c dpdk-20.11.8/app/test/test_cryptodev.c
--- dpdk-20.11.7/app/test/test_cryptodev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test/test_cryptodev.c	2023-04-27 18:57:22.000000000 +0100
@@ -10172,11 +10172,11 @@
 	TEST_ASSERT((stats.enqueued_count == 1),
 		"rte_cryptodev_stats_get returned unexpected enqueued stat");
 	TEST_ASSERT((stats.dequeued_count == 1),
-		"rte_cryptodev_stats_get returned unexpected enqueued stat");
+		"rte_cryptodev_stats_get returned unexpected dequeued stat");
 	TEST_ASSERT((stats.enqueue_err_count == 0),
-		"rte_cryptodev_stats_get returned unexpected enqueued stat");
+		"rte_cryptodev_stats_get returned unexpected enqueued error count stat");
 	TEST_ASSERT((stats.dequeue_err_count == 0),
-		"rte_cryptodev_stats_get returned unexpected enqueued stat");
+		"rte_cryptodev_stats_get returned unexpected dequeued error count stat");
 
 	/* invalid device but should ignore and not reset device stats*/
 	rte_cryptodev_stats_reset(ts_params->valid_devs[0] + 300);
@@ -10184,7 +10184,7 @@
 			&stats),
 		"rte_cryptodev_stats_get failed");
 	TEST_ASSERT((stats.enqueued_count == 1),
-		"rte_cryptodev_stats_get returned unexpected enqueued stat");
+		"rte_cryptodev_stats_get returned unexpected enqueued stat after invalid reset");
 
 	/* check that a valid reset clears stats */
 	rte_cryptodev_stats_reset(ts_params->valid_devs[0]);
@@ -10192,9 +10192,9 @@
 			&stats),
 					  "rte_cryptodev_stats_get failed");
 	TEST_ASSERT((stats.enqueued_count == 0),
-		"rte_cryptodev_stats_get returned unexpected enqueued stat");
+		"rte_cryptodev_stats_get returned unexpected enqueued stat after valid reset");
 	TEST_ASSERT((stats.dequeued_count == 0),
-		"rte_cryptodev_stats_get returned unexpected enqueued stat");
+		"rte_cryptodev_stats_get returned unexpected dequeued stat after valid reset");
 
 	return TEST_SUCCESS;
 }
diff -Nru dpdk-20.11.7/app/test/test_cryptodev_security_pdcp_test_vectors.h dpdk-20.11.8/app/test/test_cryptodev_security_pdcp_test_vectors.h
--- dpdk-20.11.7/app/test/test_cryptodev_security_pdcp_test_vectors.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test/test_cryptodev_security_pdcp_test_vectors.h	2023-04-27 18:57:22.000000000 +0100
@@ -5441,7 +5441,7 @@
 		    0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70,
 		    0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C,
 		    0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46,
-		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD},
+		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00},
 	/* Control Plane w/NULL enc. + NULL int. DL LONG SN */
 	(uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82,
 		    0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71,
@@ -5449,7 +5449,7 @@
 		    0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70,
 		    0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C,
 		    0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46,
-		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD},
+		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00},
 	/* Control Plane w/NULL enc. + SNOW f9 int. UL LONG SN */
 	(uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82,
 		    0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71,
@@ -5835,7 +5835,7 @@
 		    0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70,
 		    0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C,
 		    0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46,
-		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD},
+		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00},
 	/* User Plane w/NULL enc. + NULL int. DL for 12-bit SN */
 	(uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82,
 		    0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71,
@@ -5843,7 +5843,7 @@
 		    0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70,
 		    0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C,
 		    0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46,
-		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD},
+		    0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00},
 	/* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN */
 	(uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82,
 		    0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71,
diff -Nru dpdk-20.11.7/app/test/test_mbuf.c dpdk-20.11.8/app/test/test_mbuf.c
--- dpdk-20.11.7/app/test/test_mbuf.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test/test_mbuf.c	2023-04-27 18:57:22.000000000 +0100
@@ -2744,6 +2744,7 @@
 
 	/* split m0 chain in two, between m1 and m2 */
 	m0->nb_segs = 2;
+	m0->pkt_len -= m2->data_len;
 	m1->next = NULL;
 	m2->nb_segs = 1;
 
@@ -2764,6 +2765,7 @@
 			m2->nb_segs != 1 || m2->next != NULL)
 		GOTO_FAIL("nb_segs or next was not reset properly");
 
+	rte_mempool_free(pool);
 	return 0;
 
 fail:
diff -Nru dpdk-20.11.7/app/test/test_reorder.c dpdk-20.11.8/app/test/test_reorder.c
--- dpdk-20.11.7/app/test/test_reorder.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test/test_reorder.c	2023-04-27 18:57:22.000000000 +0100
@@ -270,6 +270,7 @@
 	}
 	if (robufs[0] != NULL)
 		rte_pktmbuf_free(robufs[0]);
+	memset(robufs, 0, sizeof(robufs));
 
 	/* Insert more packets
 	 * RB[] = {NULL, NULL, NULL, NULL}
@@ -306,6 +307,7 @@
 		if (robufs[i] != NULL)
 			rte_pktmbuf_free(robufs[i]);
 	}
+	memset(robufs, 0, sizeof(robufs));
 
 	/*
 	 * RB[] = {NULL, NULL, NULL, NULL}
diff -Nru dpdk-20.11.7/app/test-bbdev/test_bbdev_perf.c dpdk-20.11.8/app/test-bbdev/test_bbdev_perf.c
--- dpdk-20.11.7/app/test-bbdev/test_bbdev_perf.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-bbdev/test_bbdev_perf.c	2023-04-27 18:57:22.000000000 +0100
@@ -70,13 +70,12 @@
 
 #define SYNC_WAIT 0
 #define SYNC_START 1
-#define INVALID_OPAQUE -1
 
 #define INVALID_QUEUE_ID -1
 /* Increment for next code block in external HARQ memory */
 #define HARQ_INCR 32768
 /* Headroom for filler LLRs insertion in HARQ buffer */
-#define FILLER_HEADROOM 1024
+#define FILLER_HEADROOM 2048
 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
 #define N_ZC_1 66 /* N = 66 Zc for BG 1 */
 #define N_ZC_2 50 /* N = 50 Zc for BG 2 */
@@ -87,6 +86,7 @@
 #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
 #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
 
+#define HARQ_MEM_TOLERANCE 256
 static struct test_bbdev_vector test_vector;
 
 /* Switch between PMD and Interrupt for throughput TC */
@@ -1779,10 +1779,9 @@
 			"op_status (%d) != expected_status (%d)",
 			op->status, expected_status);
 
-	if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE)
-		TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
-				"Ordering error, expected %p, got %p",
-				(void *)(uintptr_t)order_idx, op->opaque_data);
+	TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
+			"Ordering error, expected %p, got %p",
+			(void *)(uintptr_t)order_idx, op->opaque_data);
 
 	return TEST_SUCCESS;
 }
@@ -1904,13 +1903,17 @@
 		uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
 		total_data_size += orig_op->segments[i].length;
 
-		TEST_ASSERT(orig_op->segments[i].length <
-				(uint32_t)(data_len + 64),
+		TEST_ASSERT(orig_op->segments[i].length < (uint32_t)(data_len + HARQ_MEM_TOLERANCE),
 				"Length of segment differ in original (%u) and filled (%u) op",
 				orig_op->segments[i].length, data_len);
 		harq_orig = (int8_t *) orig_op->segments[i].addr;
 		harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset);
 
+		/* Cannot compare HARQ output data for such cases */
+		if ((ldpc_llr_decimals > 1) && ((ops_ld->op_flags & RTE_BBDEV_LDPC_LLR_COMPRESSION)
+				|| (ops_ld->op_flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION)))
+			break;
+
 		if (!(ldpc_cap_flags &
 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS
 				) || (ops_ld->op_flags &
@@ -1925,9 +1928,9 @@
 					ops_ld->n_filler;
 			if (data_len > deRmOutSize)
 				data_len = deRmOutSize;
-			if (data_len > orig_op->segments[i].length)
-				data_len = orig_op->segments[i].length;
 		}
+		if (data_len > orig_op->segments[i].length)
+			data_len = orig_op->segments[i].length;
 		/*
 		 * HARQ output can have minor differences
 		 * due to integer representation and related scaling
@@ -1986,7 +1989,7 @@
 
 	/* Validate total mbuf pkt length */
 	uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
-	TEST_ASSERT(total_data_size < pkt_len + 64,
+	TEST_ASSERT(total_data_size < pkt_len + HARQ_MEM_TOLERANCE,
 			"Length of data differ in original (%u) and filled (%u) op",
 			total_data_size, pkt_len);
 
@@ -4361,7 +4364,8 @@
 		if (unlikely(num_to_process - dequeued < burst_sz))
 			burst_sz = num_to_process - dequeued;
 
-		rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+		ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+		TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", burst_sz);
 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
 			copy_reference_dec_op(ops_enq, burst_sz, dequeued,
 					bufs->inputs,
@@ -4446,7 +4450,8 @@
 		if (unlikely(num_to_process - dequeued < burst_sz))
 			burst_sz = num_to_process - dequeued;
 
-		rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+		ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+		TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", burst_sz);
 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
 			copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
 					bufs->inputs,
@@ -4698,7 +4703,7 @@
 	printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
 	return TEST_SKIPPED;
 #else
-	int iter;
+	int iter, ret;
 	uint16_t burst_sz = op_params->burst_sz;
 	const uint16_t num_to_process = op_params->num_to_process;
 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
@@ -4789,7 +4794,10 @@
 			rte_get_tsc_hz());
 
 	struct rte_bbdev_stats stats = {0};
-	get_bbdev_queue_stats(ad->dev_id, queue_id, &stats);
+	ret = get_bbdev_queue_stats(ad->dev_id, queue_id, &stats);
+	TEST_ASSERT_SUCCESS(ret,
+			"Failed to get stats for queue (%u) of device (%u)",
+			queue_id, ad->dev_id);
 	if (op_type != RTE_BBDEV_OP_LDPC_DEC) {
 		TEST_ASSERT_SUCCESS(stats.enqueued_count != num_to_process,
 				"Mismatch in enqueue count %10"PRIu64" %d",
diff -Nru dpdk-20.11.7/app/test-compress-perf/comp_perf_options.h dpdk-20.11.8/app/test-compress-perf/comp_perf_options.h
--- dpdk-20.11.7/app/test-compress-perf/comp_perf_options.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/comp_perf_options.h	2023-04-27 18:57:22.000000000 +0100
@@ -30,9 +30,9 @@
 };
 
 enum comp_operation {
-	COMPRESS_ONLY,
-	DECOMPRESS_ONLY,
-	COMPRESS_DECOMPRESS
+	COMPRESS = (1 << 0),
+	DECOMPRESS = (1 << 1),
+	COMPRESS_DECOMPRESS = (COMPRESS | DECOMPRESS),
 };
 
 struct range_list {
diff -Nru dpdk-20.11.7/app/test-compress-perf/comp_perf_options_parse.c dpdk-20.11.8/app/test-compress-perf/comp_perf_options_parse.c
--- dpdk-20.11.7/app/test-compress-perf/comp_perf_options_parse.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/comp_perf_options_parse.c	2023-04-27 18:57:22.000000000 +0100
@@ -446,11 +446,11 @@
 	struct name_id_map optype_namemap[] = {
 		{
 			"comp",
-			COMPRESS_ONLY
+			COMPRESS
 		},
 		{
 			"decomp",
-			DECOMPRESS_ONLY
+			DECOMPRESS
 		},
 		{
 			"comp_and_decomp",
@@ -491,7 +491,7 @@
 	int id = get_str_key_id_mapping(huffman_namemap,
 			RTE_DIM(huffman_namemap), arg);
 	if (id < 0) {
-		RTE_LOG(ERR, USER1, "Invalid Huffmane encoding specified\n");
+		RTE_LOG(ERR, USER1, "Invalid Huffman encoding specified\n");
 		return -1;
 	}
 
@@ -507,7 +507,7 @@
 
 	/*
 	 * Try parsing the argument as a range, if it fails,
-	 * arse it as a list
+	 * parse it as a list
 	 */
 	if (parse_range(arg, &test_data->level_lst.min,
 			&test_data->level_lst.max,
diff -Nru dpdk-20.11.7/app/test-compress-perf/comp_perf_test_common.c dpdk-20.11.8/app/test-compress-perf/comp_perf_test_common.c
--- dpdk-20.11.7/app/test-compress-perf/comp_perf_test_common.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/comp_perf_test_common.c	2023-04-27 18:57:22.000000000 +0100
@@ -227,23 +227,43 @@
 {
 	uint16_t comp_mbuf_size;
 	uint16_t decomp_mbuf_size;
+	size_t comp_data_size;
+	size_t decomp_data_size;
+	size_t output_data_sz;
 
 	test_data->out_seg_sz = find_buf_size(test_data->seg_sz);
 
-	/* Number of segments for input and output
-	 * (compression and decompression)
-	 */
-	test_data->total_segs = DIV_CEIL(test_data->input_data_sz,
-			test_data->seg_sz);
+	if (test_data->test_op & COMPRESS) {
+		/*
+		 * Number of segments for input and output
+		 * (compression and decompression)
+		 */
+		test_data->total_segs = DIV_CEIL(test_data->input_data_sz,
+						 test_data->seg_sz);
+	} else {
+		/*
+		 * When application does decompression only, input data is
+		 * compressed and smaller than the output. The expected size of
+		 * uncompressed data given by the user in segment size argument.
+		 */
+		test_data->total_segs = test_data->max_sgl_segs;
+	}
+
+	output_data_sz = (size_t) test_data->out_seg_sz * test_data->total_segs;
+	output_data_sz =
+		RTE_MAX(output_data_sz, (size_t) MIN_COMPRESSED_BUF_SIZE);
 
 	if (test_data->use_external_mbufs != 0) {
 		if (comp_perf_allocate_external_mbufs(test_data, mem) < 0)
 			return -1;
 		comp_mbuf_size = 0;
 		decomp_mbuf_size = 0;
-	} else {
+	} else if (test_data->test_op & COMPRESS) {
 		comp_mbuf_size = test_data->out_seg_sz + RTE_PKTMBUF_HEADROOM;
 		decomp_mbuf_size = test_data->seg_sz + RTE_PKTMBUF_HEADROOM;
+	} else {
+		comp_mbuf_size = test_data->seg_sz + RTE_PKTMBUF_HEADROOM;
+		decomp_mbuf_size = test_data->out_seg_sz + RTE_PKTMBUF_HEADROOM;
 	}
 
 	char pool_name[32] = "";
@@ -287,26 +307,28 @@
 		return -1;
 	}
 
-	/*
-	 * Compressed data might be a bit larger than input data,
-	 * if data cannot be compressed
-	 */
-	mem->compressed_data = rte_zmalloc_socket(NULL,
-				RTE_MAX(
-				    (size_t) test_data->out_seg_sz *
-							  test_data->total_segs,
-				    (size_t) MIN_COMPRESSED_BUF_SIZE),
-				0,
-				rte_socket_id());
+	if (test_data->test_op & COMPRESS) {
+		/*
+		 * Compressed data might be a bit larger than input data,
+		 * if data cannot be compressed
+		 */
+		comp_data_size = output_data_sz;
+		decomp_data_size = test_data->input_data_sz;
+	} else {
+		comp_data_size = test_data->input_data_sz;
+		decomp_data_size = output_data_sz;
+	}
+
+	mem->compressed_data = rte_zmalloc_socket(NULL, comp_data_size, 0,
+						  rte_socket_id());
 	if (mem->compressed_data == NULL) {
 		RTE_LOG(ERR, USER1, "Memory to hold the data from the input "
 				"file could not be allocated\n");
 		return -1;
 	}
 
-	mem->decompressed_data = rte_zmalloc_socket(NULL,
-				test_data->input_data_sz, 0,
-				rte_socket_id());
+	mem->decompressed_data = rte_zmalloc_socket(NULL, decomp_data_size, 0,
+						    rte_socket_id());
 	if (mem->decompressed_data == NULL) {
 		RTE_LOG(ERR, USER1, "Memory to hold the data from the input "
 				"file could not be allocated\n");
@@ -344,6 +366,7 @@
 prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem)
 {
 	uint32_t remaining_data = test_data->input_data_sz;
+	uint32_t remaining_data_decomp = test_data->input_data_sz;
 	uint8_t *input_data_ptr = test_data->input_data;
 	size_t data_sz = 0;
 	uint8_t *data_addr;
@@ -351,6 +374,7 @@
 	uint16_t segs_per_mbuf = 0;
 	uint32_t cmz = 0;
 	uint32_t dmz = 0;
+	bool decompress_only = !!(test_data->test_op == DECOMPRESS);
 
 	for (i = 0; i < mem->total_bufs; i++) {
 		/* Allocate data in input mbuf and copy data from input file */
@@ -361,8 +385,6 @@
 			return -1;
 		}
 
-		data_sz = RTE_MIN(remaining_data, test_data->seg_sz);
-
 		if (test_data->use_external_mbufs != 0) {
 			rte_pktmbuf_attach_extbuf(mem->decomp_bufs[i],
 					mem->decomp_memzones[dmz]->addr,
@@ -372,16 +394,23 @@
 			dmz++;
 		}
 
+		if (!decompress_only)
+			data_sz = RTE_MIN(remaining_data, test_data->seg_sz);
+		else
+			data_sz = test_data->out_seg_sz;
+
 		data_addr = (uint8_t *) rte_pktmbuf_append(
 					mem->decomp_bufs[i], data_sz);
 		if (data_addr == NULL) {
 			RTE_LOG(ERR, USER1, "Could not append data\n");
 			return -1;
 		}
-		rte_memcpy(data_addr, input_data_ptr, data_sz);
 
-		input_data_ptr += data_sz;
-		remaining_data -= data_sz;
+		if (!decompress_only) {
+			rte_memcpy(data_addr, input_data_ptr, data_sz);
+			input_data_ptr += data_sz;
+			remaining_data -= data_sz;
+		}
 
 		/* Already one segment in the mbuf */
 		segs_per_mbuf = 1;
@@ -398,8 +427,6 @@
 				return -1;
 			}
 
-			data_sz = RTE_MIN(remaining_data, test_data->seg_sz);
-
 			if (test_data->use_external_mbufs != 0) {
 				rte_pktmbuf_attach_extbuf(
 					next_seg,
@@ -410,6 +437,12 @@
 				dmz++;
 			}
 
+			if (!decompress_only)
+				data_sz = RTE_MIN(remaining_data,
+						  test_data->seg_sz);
+			else
+				data_sz = test_data->out_seg_sz;
+
 			data_addr = (uint8_t *)rte_pktmbuf_append(next_seg,
 				data_sz);
 
@@ -418,9 +451,11 @@
 				return -1;
 			}
 
-			rte_memcpy(data_addr, input_data_ptr, data_sz);
-			input_data_ptr += data_sz;
-			remaining_data -= data_sz;
+			if (!decompress_only) {
+				rte_memcpy(data_addr, input_data_ptr, data_sz);
+				input_data_ptr += data_sz;
+				remaining_data -= data_sz;
+			}
 
 			if (rte_pktmbuf_chain(mem->decomp_bufs[i],
 					next_seg) < 0) {
@@ -447,16 +482,26 @@
 			cmz++;
 		}
 
-		data_addr = (uint8_t *) rte_pktmbuf_append(
-					mem->comp_bufs[i],
-					test_data->out_seg_sz);
+		if (decompress_only)
+			data_sz = RTE_MIN(remaining_data_decomp, test_data->seg_sz);
+		else
+			data_sz = test_data->out_seg_sz;
+
+		data_addr = (uint8_t *) rte_pktmbuf_append(mem->comp_bufs[i],
+							   data_sz);
 		if (data_addr == NULL) {
 			RTE_LOG(ERR, USER1, "Could not append data\n");
 			return -1;
 		}
 
+		if (decompress_only) {
+			rte_memcpy(data_addr, input_data_ptr, data_sz);
+			input_data_ptr += data_sz;
+			remaining_data_decomp -= data_sz;
+		}
+
 		/* Chain mbufs if needed for output mbufs */
-		for (j = 1; j < segs_per_mbuf; j++) {
+		for (j = 1; j < segs_per_mbuf && remaining_data_decomp > 0; j++) {
 			struct rte_mbuf *next_seg =
 				rte_pktmbuf_alloc(mem->comp_buf_pool);
 
@@ -476,13 +521,25 @@
 				cmz++;
 			}
 
+			if (decompress_only)
+				data_sz = RTE_MIN(remaining_data_decomp,
+						  test_data->seg_sz);
+			else
+				data_sz = test_data->out_seg_sz;
+
 			data_addr = (uint8_t *)rte_pktmbuf_append(next_seg,
-				test_data->out_seg_sz);
+								  data_sz);
 			if (data_addr == NULL) {
 				RTE_LOG(ERR, USER1, "Could not append data\n");
 				return -1;
 			}
 
+			if (decompress_only) {
+				rte_memcpy(data_addr, input_data_ptr, data_sz);
+				input_data_ptr += data_sz;
+				remaining_data_decomp -= data_sz;
+			}
+
 			if (rte_pktmbuf_chain(mem->comp_bufs[i],
 					next_seg) < 0) {
 				RTE_LOG(ERR, USER1, "Could not chain mbufs\n");
diff -Nru dpdk-20.11.7/app/test-compress-perf/comp_perf_test_cyclecount.c dpdk-20.11.8/app/test-compress-perf/comp_perf_test_cyclecount.c
--- dpdk-20.11.7/app/test-compress-perf/comp_perf_test_cyclecount.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/comp_perf_test_cyclecount.c	2023-04-27 18:57:22.000000000 +0100
@@ -510,38 +510,55 @@
 	if (cperf_verify_test_runner(&ctx->ver))
 		return EXIT_FAILURE;
 
-	/*
-	 * Run the tests twice, discarding the first performance
-	 * results, before the cache is warmed up
-	 */
-
-	/* C O M P R E S S */
-	for (i = 0; i < 2; i++) {
-		if (main_loop(ctx, RTE_COMP_COMPRESS) < 0)
-			return EXIT_FAILURE;
-	}
-
-	ops_enq_retries_comp = ctx->ops_enq_retries;
-	ops_deq_retries_comp = ctx->ops_deq_retries;
-
-	duration_enq_per_op_comp = ctx->duration_enq /
-			(ctx->ver.mem.total_bufs * test_data->num_iter);
-	duration_deq_per_op_comp = ctx->duration_deq /
-			(ctx->ver.mem.total_bufs * test_data->num_iter);
+	if (test_data->test_op & COMPRESS) {
+		/*
+		 * Run the test twice, discarding the first performance
+		 * results, before the cache is warmed up
+		 */
+		for (i = 0; i < 2; i++) {
+			if (main_loop(ctx, RTE_COMP_COMPRESS) < 0)
+				return EXIT_FAILURE;
+		}
+
+		ops_enq_retries_comp = ctx->ops_enq_retries;
+		ops_deq_retries_comp = ctx->ops_deq_retries;
+
+		duration_enq_per_op_comp = ctx->duration_enq /
+				(ctx->ver.mem.total_bufs * test_data->num_iter);
+		duration_deq_per_op_comp = ctx->duration_deq /
+				(ctx->ver.mem.total_bufs * test_data->num_iter);
+	} else {
+		ops_enq_retries_comp = 0;
+		ops_deq_retries_comp = 0;
 
-	/* D E C O M P R E S S */
-	for (i = 0; i < 2; i++) {
-		if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0)
-			return EXIT_FAILURE;
+		duration_enq_per_op_comp = 0;
+		duration_deq_per_op_comp = 0;
 	}
 
-	ops_enq_retries_decomp = ctx->ops_enq_retries;
-	ops_deq_retries_decomp = ctx->ops_deq_retries;
+	if (test_data->test_op & DECOMPRESS) {
+		/*
+		 * Run the test twice, discarding the first performance
+		 * results, before the cache is warmed up
+		 */
+		for (i = 0; i < 2; i++) {
+			if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0)
+				return EXIT_FAILURE;
+		}
+
+		ops_enq_retries_decomp = ctx->ops_enq_retries;
+		ops_deq_retries_decomp = ctx->ops_deq_retries;
+
+		duration_enq_per_op_decomp = ctx->duration_enq /
+				(ctx->ver.mem.total_bufs * test_data->num_iter);
+		duration_deq_per_op_decomp = ctx->duration_deq /
+				(ctx->ver.mem.total_bufs * test_data->num_iter);
+	} else {
+		ops_enq_retries_decomp = 0;
+		ops_deq_retries_decomp = 0;
 
-	duration_enq_per_op_decomp = ctx->duration_enq /
-			(ctx->ver.mem.total_bufs * test_data->num_iter);
-	duration_deq_per_op_decomp = ctx->duration_deq /
-			(ctx->ver.mem.total_bufs * test_data->num_iter);
+		duration_enq_per_op_decomp = 0;
+		duration_deq_per_op_decomp = 0;
+	}
 
 	duration_setup_per_op = ctx->duration_op /
 			(ctx->ver.mem.total_bufs * test_data->num_iter);
@@ -558,7 +575,7 @@
 		"    [D-e] - decompression enqueue\n"
 		"    [D-d] - decompression dequeue\n"
 		"  - Cycles section: number of cycles per 'op' for the following operations:\n"
-		"    setup/op - memory allocation, op configuration and memory dealocation\n"
+		"    setup/op - memory allocation, op configuration and memory deallocation\n"
 		"    [C-e] - compression enqueue\n"
 		"    [C-d] - compression dequeue\n"
 		"    [D-e] - decompression enqueue\n"
diff -Nru dpdk-20.11.7/app/test-compress-perf/comp_perf_test_throughput.c dpdk-20.11.8/app/test-compress-perf/comp_perf_test_throughput.c
--- dpdk-20.11.7/app/test-compress-perf/comp_perf_test_throughput.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/comp_perf_test_throughput.c	2023-04-27 18:57:22.000000000 +0100
@@ -355,41 +355,53 @@
 	 * First the verification part is needed
 	 */
 	if (cperf_verify_test_runner(&ctx->ver)) {
-		ret =  EXIT_FAILURE;
+		ret = EXIT_FAILURE;
 		goto end;
 	}
 
-	/*
-	 * Run the tests twice, discarding the first performance
-	 * results, before the cache is warmed up
-	 */
-	for (i = 0; i < 2; i++) {
-		if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) {
-			ret = EXIT_FAILURE;
-			goto end;
+	if (test_data->test_op & COMPRESS) {
+		/*
+		 * Run the test twice, discarding the first performance
+		 * results, before the cache is warmed up
+		 */
+		for (i = 0; i < 2; i++) {
+			if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) {
+				ret = EXIT_FAILURE;
+				goto end;
+			}
 		}
-	}
 
-	for (i = 0; i < 2; i++) {
-		if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) {
-			ret = EXIT_FAILURE;
-			goto end;
-		}
+		ctx->comp_tsc_byte =
+			(double)(ctx->comp_tsc_duration[test_data->level]) /
+						       test_data->input_data_sz;
+		ctx->comp_gbps = rte_get_tsc_hz() / ctx->comp_tsc_byte * 8 /
+								     1000000000;
+	} else {
+		ctx->comp_tsc_byte = 0;
+		ctx->comp_gbps = 0;
 	}
 
-	ctx->comp_tsc_byte =
-			(double)(ctx->comp_tsc_duration[test_data->level]) /
-					test_data->input_data_sz;
+	if (test_data->test_op & DECOMPRESS) {
+		/*
+		 * Run the test twice, discarding the first performance
+		 * results, before the cache is warmed up
+		 */
+		for (i = 0; i < 2; i++) {
+			if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) {
+				ret = EXIT_FAILURE;
+				goto end;
+			}
+		}
 
-	ctx->decomp_tsc_byte =
+		ctx->decomp_tsc_byte =
 			(double)(ctx->decomp_tsc_duration[test_data->level]) /
-					test_data->input_data_sz;
-
-	ctx->comp_gbps = rte_get_tsc_hz() / ctx->comp_tsc_byte * 8 /
-			1000000000;
-
-	ctx->decomp_gbps = rte_get_tsc_hz() / ctx->decomp_tsc_byte * 8 /
-			1000000000;
+						       test_data->input_data_sz;
+		ctx->decomp_gbps = rte_get_tsc_hz() / ctx->decomp_tsc_byte * 8 /
+								     1000000000;
+	} else {
+		ctx->decomp_tsc_byte = 0;
+		ctx->decomp_gbps = 0;
+	}
 
 	if (rte_atomic16_test_and_set(&display_once)) {
 		printf("\n%12s%6s%12s%17s%15s%16s\n",
diff -Nru dpdk-20.11.7/app/test-compress-perf/comp_perf_test_verify.c dpdk-20.11.8/app/test-compress-perf/comp_perf_test_verify.c
--- dpdk-20.11.7/app/test-compress-perf/comp_perf_test_verify.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/comp_perf_test_verify.c	2023-04-27 18:57:22.000000000 +0100
@@ -112,7 +112,8 @@
 		output_data_sz = &ctx->decomp_data_sz;
 		input_bufs = mem->comp_bufs;
 		output_bufs = mem->decomp_bufs;
-		out_seg_sz = test_data->seg_sz;
+		out_seg_sz = (test_data->test_op & COMPRESS) ?
+			     test_data->seg_sz : test_data->out_seg_sz;
 	}
 
 	/* Create private xform */
@@ -224,7 +225,7 @@
 				  op->status ==
 				  RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
 					RTE_LOG(ERR, USER1,
-"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n");
+"Out of space error occurred due to incompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n");
 					res = -1;
 					goto end;
 				} else if (op->status !=
@@ -309,7 +310,7 @@
 				  op->status ==
 				  RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
 					RTE_LOG(ERR, USER1,
-"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n");
+"Out of space error occurred due to incompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n");
 					res = -1;
 					goto end;
 				} else if (op->status !=
@@ -395,32 +396,47 @@
 
 	test_data->ratio = 0;
 
-	if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) {
-		ret = EXIT_FAILURE;
-		goto end;
+	if (test_data->test_op & COMPRESS) {
+		if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) {
+			ret = EXIT_FAILURE;
+			goto end;
+		}
 	}
 
-	if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) {
-		ret = EXIT_FAILURE;
-		goto end;
-	}
+	if (test_data->test_op & DECOMPRESS) {
+		if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) {
+			ret = EXIT_FAILURE;
+			goto end;
+		}
 
-	if (ctx->decomp_data_sz != test_data->input_data_sz) {
-		RTE_LOG(ERR, USER1,
-	   "Decompressed data length not equal to input data length\n");
-		RTE_LOG(ERR, USER1,
-			"Decompressed size = %zu, expected = %zu\n",
-			ctx->decomp_data_sz, test_data->input_data_sz);
-		ret = EXIT_FAILURE;
-		goto end;
-	} else {
-		if (memcmp(ctx->mem.decompressed_data,
-				test_data->input_data,
-				test_data->input_data_sz) != 0) {
+		if (!(test_data->test_op & COMPRESS)) {
+			/*
+			 * For DECOMPRESS_ONLY mode there is no more
+			 * verifications, reset the 'ratio' and 'comp_data_sz'
+			 * fields for other tests report.
+			 */
+			ctx->comp_data_sz = 0;
+			ctx->ratio = 0;
+			goto end;
+		}
+
+		if (ctx->decomp_data_sz != test_data->input_data_sz) {
+			RTE_LOG(ERR, USER1,
+				"Decompressed data length not equal to input data length\n");
 			RTE_LOG(ERR, USER1,
-		    "Decompressed data is not the same as file data\n");
+				"Decompressed size = %zu, expected = %zu\n",
+				ctx->decomp_data_sz, test_data->input_data_sz);
 			ret = EXIT_FAILURE;
 			goto end;
+		} else {
+			if (memcmp(ctx->mem.decompressed_data,
+					test_data->input_data,
+					test_data->input_data_sz) != 0) {
+				RTE_LOG(ERR, USER1,
+					"Decompressed data is not the same as file data\n");
+				ret = EXIT_FAILURE;
+				goto end;
+			}
 		}
 	}
 
diff -Nru dpdk-20.11.7/app/test-compress-perf/main.c dpdk-20.11.8/app/test-compress-perf/main.c
--- dpdk-20.11.7/app/test-compress-perf/main.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-compress-perf/main.c	2023-04-27 18:57:22.000000000 +0100
@@ -253,6 +253,14 @@
 		goto end;
 	}
 
+	if (!(test_data->test_op & COMPRESS) &&
+	    test_data->input_data_sz >
+	    (size_t) test_data->seg_sz * (size_t) test_data->max_sgl_segs) {
+		RTE_LOG(ERR, USER1,
+			"Size of input must be less than total segments\n");
+		goto end;
+	}
+
 	test_data->input_data = rte_zmalloc_socket(NULL,
 				test_data->input_data_sz, 0, rte_socket_id());
 
diff -Nru dpdk-20.11.7/app/test-crypto-perf/cperf_options_parsing.c dpdk-20.11.8/app/test-crypto-perf/cperf_options_parsing.c
--- dpdk-20.11.7/app/test-crypto-perf/cperf_options_parsing.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-crypto-perf/cperf_options_parsing.c	2023-04-27 18:57:22.000000000 +0100
@@ -496,6 +496,7 @@
 	if (access(opts->test_file, F_OK) != -1)
 		return 0;
 	RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n");
+	free(opts->test_file);
 
 	return -1;
 }
diff -Nru dpdk-20.11.7/app/test-crypto-perf/cperf_test_common.c dpdk-20.11.8/app/test-crypto-perf/cperf_test_common.c
--- dpdk-20.11.7/app/test-crypto-perf/cperf_test_common.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-crypto-perf/cperf_test_common.c	2023-04-27 18:57:22.000000000 +0100
@@ -166,9 +166,11 @@
 				RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
 	uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
 	uint32_t max_size = options->max_buffer_size + options->digest_sz;
-	uint16_t segments_nb = (max_size % options->segment_sz) ?
-			(max_size / options->segment_sz) + 1 :
-			max_size / options->segment_sz;
+	uint32_t segment_data_len = options->segment_sz - options->headroom_sz -
+				    options->tailroom_sz;
+	uint16_t segments_nb = (max_size % segment_data_len) ?
+				(max_size / segment_data_len) + 1 :
+				(max_size / segment_data_len);
 	uint32_t obj_size = crypto_op_total_size_padded +
 				(mbuf_size * segments_nb);
 
diff -Nru dpdk-20.11.7/app/test-crypto-perf/cperf_test_vector_parsing.c dpdk-20.11.8/app/test-crypto-perf/cperf_test_vector_parsing.c
--- dpdk-20.11.7/app/test-crypto-perf/cperf_test_vector_parsing.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-crypto-perf/cperf_test_vector_parsing.c	2023-04-27 18:57:22.000000000 +0100
@@ -28,6 +28,7 @@
 		rte_free(vector->cipher_key.data);
 		rte_free(vector->auth_key.data);
 		rte_free(vector->ciphertext.data);
+		free(opts->test_file);
 	}
 
 	rte_free(vector);
diff -Nru dpdk-20.11.7/app/test-flow-perf/main.c dpdk-20.11.8/app/test-flow-perf/main.c
--- dpdk-20.11.7/app/test-flow-perf/main.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-flow-perf/main.c	2023-04-27 18:57:22.000000000 +0100
@@ -709,13 +709,11 @@
 			if (strcmp(lgopts[opt_idx].name,
 					"rules-batch") == 0) {
 				n = atoi(optarg);
-				if (n >= DEFAULT_RULES_BATCH)
+				if (n > 0)
 					rules_batch = n;
-				else {
+				else
 					rte_exit(EXIT_FAILURE,
-						"rules_batch should be >= %d\n",
-						DEFAULT_RULES_BATCH);
-				}
+							"flow rules-batch should be > 0\n");
 			}
 			if (strcmp(lgopts[opt_idx].name,
 					"rules-count") == 0) {
diff -Nru dpdk-20.11.7/app/test-pmd/cmdline.c dpdk-20.11.8/app/test-pmd/cmdline.c
--- dpdk-20.11.7/app/test-pmd/cmdline.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/cmdline.c	2023-04-27 18:57:22.000000000 +0100
@@ -71,9 +71,8 @@
 #include "cmdline_tm.h"
 #include "bpf_cmd.h"
 
-static struct cmdline *testpmd_cl;
-
 static void cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue);
+static struct cmdline *testpmd_cl;
 
 /* *** Help command with introduction. *** */
 struct cmd_help_brief_result {
@@ -17195,35 +17194,28 @@
 	printf("Read CLI commands from %s\n", filename);
 }
 
+void
+prompt_exit(void)
+{
+	cmdline_quit(testpmd_cl);
+}
+
 /* prompt function, called from main on MAIN lcore */
 void
 prompt(void)
 {
-	int ret;
-	/* initialize non-constant commands */
 	cmd_set_fwd_mode_init();
 	cmd_set_fwd_retry_mode_init();
 
 	testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> ");
-	if (testpmd_cl == NULL)
+	if (testpmd_cl == NULL) {
+		fprintf(stderr,
+			"Failed to create stdin based cmdline context\n");
 		return;
-
-	ret = atexit(prompt_exit);
-	if (ret != 0)
-		printf("Cannot set exit function for cmdline\n");
+	}
 
 	cmdline_interact(testpmd_cl);
-	if (ret != 0)
-		cmdline_stdin_exit(testpmd_cl);
-}
-
-void
-prompt_exit(void)
-{
-	if (testpmd_cl != NULL) {
-		cmdline_quit(testpmd_cl);
-		cmdline_stdin_exit(testpmd_cl);
-	}
+	cmdline_stdin_exit(testpmd_cl);
 }
 
 static void
diff -Nru dpdk-20.11.7/app/test-pmd/cmdline_flow.c dpdk-20.11.8/app/test-pmd/cmdline_flow.c
--- dpdk-20.11.7/app/test-pmd/cmdline_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/cmdline_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -5281,15 +5281,15 @@
 	       l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (l2_encap_conf.select_vlan) {
 		if (l2_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	action_encap_data->conf.size = header -
 		action_encap_data->data;
@@ -5337,11 +5337,11 @@
 	header = action_decap_data->data;
 	if (l2_decap_conf.select_vlan)
 		eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (l2_decap_conf.select_vlan) {
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	action_decap_data->conf.size = header -
 		action_decap_data->data;
@@ -5421,15 +5421,15 @@
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -5516,15 +5516,15 @@
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -5615,15 +5615,15 @@
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -5712,15 +5712,15 @@
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
diff -Nru dpdk-20.11.7/app/test-pmd/csumonly.c dpdk-20.11.8/app/test-pmd/csumonly.c
--- dpdk-20.11.7/app/test-pmd/csumonly.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/csumonly.c	2023-04-27 18:57:22.000000000 +0100
@@ -1133,9 +1133,12 @@
 
 	nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
 			tx_pkts_burst, nb_rx);
-	if (nb_prep != nb_rx)
+	if (nb_prep != nb_rx) {
 		printf("Preparing packet burst to transmit failed: %s\n",
 				rte_strerror(rte_errno));
+		fs->fwd_dropped += (nb_rx - nb_prep);
+		rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_prep], nb_rx - nb_prep);
+	}
 
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
 			nb_prep);
@@ -1143,12 +1146,12 @@
 	/*
 	 * Retry if necessary
 	 */
-	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+	if (unlikely(nb_tx < nb_prep) && fs->retry_enabled) {
 		retry = 0;
-		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+		while (nb_tx < nb_prep && retry++ < burst_tx_retry_num) {
 			rte_delay_us(burst_tx_delay_time);
 			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&tx_pkts_burst[nb_tx], nb_rx - nb_tx);
+					&tx_pkts_burst[nb_tx], nb_prep - nb_tx);
 		}
 	}
 	fs->tx_packets += nb_tx;
@@ -1157,11 +1160,11 @@
 	fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
 
 	inc_tx_burst_stats(fs, nb_tx);
-	if (unlikely(nb_tx < nb_rx)) {
-		fs->fwd_dropped += (nb_rx - nb_tx);
+	if (unlikely(nb_tx < nb_prep)) {
+		fs->fwd_dropped += (nb_prep - nb_tx);
 		do {
 			rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
-		} while (++nb_tx < nb_rx);
+		} while (++nb_tx < nb_prep);
 	}
 
 	get_end_cycles(fs, start_tsc);
diff -Nru dpdk-20.11.7/app/test-pmd/ieee1588fwd.c dpdk-20.11.8/app/test-pmd/ieee1588fwd.c
--- dpdk-20.11.7/app/test-pmd/ieee1588fwd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/ieee1588fwd.c	2023-04-27 18:57:22.000000000 +0100
@@ -184,13 +184,13 @@
 
 	/* Forward PTP packet with hardware TX timestamp */
 	mb->ol_flags |= PKT_TX_IEEE1588_TMST;
-	fs->tx_packets += 1;
 	if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
 		printf("Port %u sent PTP packet dropped\n", fs->rx_port);
 		fs->fwd_dropped += 1;
 		rte_pktmbuf_free(mb);
 		return;
 	}
+	fs->tx_packets += 1;
 
 	/*
 	 * Check the TX timestamp.
diff -Nru dpdk-20.11.7/app/test-pmd/noisy_vnf.c dpdk-20.11.8/app/test-pmd/noisy_vnf.c
--- dpdk-20.11.7/app/test-pmd/noisy_vnf.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/noisy_vnf.c	2023-04-27 18:57:22.000000000 +0100
@@ -213,9 +213,10 @@
 		sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
 					 tmp_pkts, nb_deqd);
 		if (unlikely(sent < nb_deqd) && fs->retry_enabled)
-			nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs);
-		inc_tx_burst_stats(fs, nb_tx);
+			sent += do_retry(nb_deqd, sent, tmp_pkts, fs);
+		inc_tx_burst_stats(fs, sent);
 		fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
+		nb_tx += sent;
 		ncf->prev_time = rte_get_timer_cycles();
 	}
 }
diff -Nru dpdk-20.11.7/app/test-pmd/testpmd.c dpdk-20.11.8/app/test-pmd/testpmd.c
--- dpdk-20.11.7/app/test-pmd/testpmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/testpmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -10,6 +10,7 @@
 #include <time.h>
 #include <fcntl.h>
 #include <sys/mman.h>
+#include <sys/select.h>
 #include <sys/types.h>
 #include <errno.h>
 #include <stdbool.h>
@@ -216,7 +217,7 @@
  * In container, it cannot terminate the process which running with 'stats-period'
  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
  */
-static volatile uint8_t f_quit;
+volatile uint8_t f_quit;
 uint8_t cl_quit; /* Quit testpmd from cmdline. */
 
 /*
@@ -1845,6 +1846,8 @@
 			fwd_cycles += fs->core_cycles;
 	}
 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+		uint64_t tx_dropped = 0;
+
 		pt_id = fwd_ports_ids[i];
 		port = &ports[pt_id];
 
@@ -1866,8 +1869,9 @@
 		total_recv += stats.ipackets;
 		total_xmit += stats.opackets;
 		total_rx_dropped += stats.imissed;
-		total_tx_dropped += ports_stats[pt_id].tx_dropped;
-		total_tx_dropped += stats.oerrors;
+		tx_dropped += ports_stats[pt_id].tx_dropped;
+		tx_dropped += stats.oerrors;
+		total_tx_dropped += tx_dropped;
 		total_rx_nombuf  += stats.rx_nombuf;
 
 		printf("\n  %s Forward statistics for port %-2d %s\n",
@@ -1891,8 +1895,8 @@
 
 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
 		       "TX-total: %-"PRIu64"\n",
-		       stats.opackets, ports_stats[pt_id].tx_dropped,
-		       stats.opackets + ports_stats[pt_id].tx_dropped);
+		       stats.opackets, tx_dropped,
+		       stats.opackets + tx_dropped);
 
 		if (record_burst_stats) {
 			if (ports_stats[pt_id].rx_stream)
@@ -3824,13 +3828,6 @@
 }
 
 static void
-force_quit(void)
-{
-	pmd_test_exit();
-	prompt_exit();
-}
-
-static void
 print_stats(void)
 {
 	uint8_t i;
@@ -3848,26 +3845,10 @@
 }
 
 static void
-signal_handler(int signum)
+signal_handler(int signum __rte_unused)
 {
-	if (signum == SIGINT || signum == SIGTERM) {
-		printf("\nSignal %d received, preparing to exit...\n",
-				signum);
-#ifdef RTE_LIB_PDUMP
-		/* uninitialize packet capture framework */
-		rte_pdump_uninit();
-#endif
-#ifdef RTE_LIB_LATENCYSTATS
-		if (latencystats_enabled != 0)
-			rte_latencystats_uninit();
-#endif
-		force_quit();
-		/* Set flag to indicate the force termination. */
-		f_quit = 1;
-		/* exit with the expected status */
-		signal(signum, SIG_DFL);
-		kill(getpid(), signum);
-	}
+	f_quit = 1;
+	prompt_exit();
 }
 
 int
@@ -3878,8 +3859,18 @@
 	uint16_t count;
 	int ret;
 
+#ifdef RTE_EXEC_ENV_WINDOWS
 	signal(SIGINT, signal_handler);
 	signal(SIGTERM, signal_handler);
+#else
+	/* Want read() not to be restarted on signal */
+	struct sigaction action = {
+		.sa_handler = signal_handler,
+	};
+
+	sigaction(SIGINT, &action, NULL);
+	sigaction(SIGTERM, &action, NULL);
+#endif
 
 	testpmd_logtype = rte_log_register("testpmd");
 	if (testpmd_logtype < 0)
@@ -3895,6 +3886,9 @@
 		rte_exit(EXIT_FAILURE,
 			 "Secondary process type not supported.\n");
 
+	/* allocate port structures, and init them */
+	init_port();
+
 	ret = register_eth_event_callback();
 	if (ret != 0)
 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
@@ -3913,9 +3907,6 @@
 	if (nb_ports == 0)
 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
 
-	/* allocate port structures, and init them */
-	init_port();
-
 	set_def_fwd_config();
 	if (nb_lcores == 0)
 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
@@ -3990,8 +3981,13 @@
 		}
 	}
 
-	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
-		rte_exit(EXIT_FAILURE, "Start ports failed\n");
+	if (!no_device_start && start_port(RTE_PORT_ALL) != 0) {
+		if (!interactive) {
+			rte_eal_cleanup();
+			rte_exit(EXIT_FAILURE, "Start ports failed\n");
+		}
+		fprintf(stderr, "Start ports failed\n");
+	}
 
 	/* set all ports to promiscuous mode by default */
 	RTE_ETH_FOREACH_DEV(port_id) {
@@ -4036,15 +4032,9 @@
 			start_packet_forwarding(0);
 		}
 		prompt();
-		pmd_test_exit();
 	} else
 #endif
 	{
-		char c;
-		int rc;
-
-		f_quit = 0;
-
 		printf("No commandline core given, start packet forwarding\n");
 		start_packet_forwarding(tx_first);
 		if (stats_period != 0) {
@@ -4067,15 +4057,41 @@
 				prev_time = cur_time;
 				sleep(1);
 			}
-		}
+		} else {
+			char c;
+			fd_set fds;
+
+			printf("Press enter to exit\n");
 
-		printf("Press enter to exit\n");
-		rc = read(0, &c, 1);
-		pmd_test_exit();
-		if (rc < 0)
-			return 1;
+			FD_ZERO(&fds);
+			FD_SET(0, &fds);
+
+			/* wait for signal or enter */
+			ret = select(1, &fds, NULL, NULL, NULL);
+			if (ret < 0 && errno != EINTR)
+				rte_exit(EXIT_FAILURE,
+					 "Select failed: %s\n",
+					 strerror(errno));
+
+			/* if got enter then consume it */
+			if (ret == 1 && read(0, &c, 1) < 0)
+				rte_exit(EXIT_FAILURE,
+					 "Read failed: %s\n",
+					 strerror(errno));
+		}
 	}
 
+	pmd_test_exit();
+
+#ifdef RTE_LIB_PDUMP
+	/* uninitialize packet capture framework */
+	rte_pdump_uninit();
+#endif
+#ifdef RTE_LIB_LATENCYSTATS
+	if (latencystats_enabled != 0)
+		rte_latencystats_uninit();
+#endif
+
 	ret = rte_eal_cleanup();
 	if (ret != 0)
 		rte_exit(EXIT_FAILURE,
diff -Nru dpdk-20.11.7/app/test-pmd/testpmd.h dpdk-20.11.8/app/test-pmd/testpmd.h
--- dpdk-20.11.7/app/test-pmd/testpmd.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/app/test-pmd/testpmd.h	2023-04-27 18:57:22.000000000 +0100
@@ -25,6 +25,7 @@
 #define RTE_PORT_HANDLING       (uint16_t)3
 
 extern uint8_t cl_quit;
+extern volatile uint8_t f_quit;
 
 /*
  * It is used to allocate the memory for hash key.
diff -Nru dpdk-20.11.7/config/meson.build dpdk-20.11.8/config/meson.build
--- dpdk-20.11.7/config/meson.build	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/config/meson.build	2023-04-27 18:57:22.000000000 +0100
@@ -107,7 +107,7 @@
 
 toolchain = cc.get_id()
 dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain)
-dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1)
+dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper().underscorify(), 1)
 
 dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8)
 dpdk_conf.set('RTE_ARCH_32', cc.sizeof('void *') == 4)
@@ -152,7 +152,7 @@
 endif
 
 has_libfdt = 0
-fdt_dep = cc.find_library('libfdt', required: false)
+fdt_dep = cc.find_library('fdt', required: false)
 if fdt_dep.found() and cc.has_header('fdt.h')
 	dpdk_conf.set10('RTE_HAS_LIBFDT', true)
 	has_libfdt = 1
@@ -160,7 +160,7 @@
 	dpdk_extra_ldflags += '-lfdt'
 endif
 
-libexecinfo = cc.find_library('libexecinfo', required: false)
+libexecinfo = cc.find_library('execinfo', required: false)
 if libexecinfo.found() and cc.has_header('execinfo.h')
 	add_project_link_arguments('-lexecinfo', language: 'c')
 	dpdk_extra_ldflags += '-lexecinfo'
diff -Nru dpdk-20.11.7/debian/changelog dpdk-20.11.8/debian/changelog
--- dpdk-20.11.7/debian/changelog	2022-12-14 00:32:18.000000000 +0000
+++ dpdk-20.11.8/debian/changelog	2023-04-28 12:27:52.000000000 +0100
@@ -1,3 +1,10 @@
+dpdk (20.11.8-1~deb11u1) bullseye; urgency=medium
+
+  * New upstream release 20.11.8; for a full list of changes see:
+    http://doc.dpdk.org/guides-20.11/rel_notes/release_20_11.html
+
+ -- Luca Boccassi <bluca@debian.org>  Fri, 28 Apr 2023 12:27:52 +0100
+
 dpdk (20.11.7-1~deb11u1) bullseye; urgency=medium
 
   * New upstream release 20.11.7; for a full list of changes see:
diff -Nru dpdk-20.11.7/devtools/check-git-log.sh dpdk-20.11.8/devtools/check-git-log.sh
--- dpdk-20.11.7/devtools/check-git-log.sh	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/devtools/check-git-log.sh	2023-04-27 18:57:22.000000000 +0100
@@ -113,7 +113,7 @@
 for word in $(cat $words); do
 	bad=$(echo "$headlines" | grep -iw $word | grep -vw $word)
 	if [ "$word" = "Tx" ]; then
-		bad=$(echo $bad | grep -v 'OCTEON\ TX')
+		bad=$(echo $bad | grep -v 'OCTEON TX')
 	fi
 	for bad_line in $bad; do
 		bad_word=$(echo $bad_line | cut -d":" -f2 | grep -iwo $word)
diff -Nru dpdk-20.11.7/doc/guides/linux_gsg/enable_func.rst dpdk-20.11.8/doc/guides/linux_gsg/enable_func.rst
--- dpdk-20.11.7/doc/guides/linux_gsg/enable_func.rst	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/doc/guides/linux_gsg/enable_func.rst	2023-04-27 18:57:22.000000000 +0100
@@ -106,12 +106,12 @@
 If the driver requires using physical addresses (PA),
 the executable file must be granted additional capabilities:
 
-* ``SYS_ADMIN`` to read ``/proc/self/pagemaps``
+* ``DAC_READ_SEARCH`` and ``SYS_ADMIN`` to read ``/proc/self/pagemaps``
 * ``IPC_LOCK`` to lock hugepages in memory
 
 .. code-block:: console
 
-   setcap cap_ipc_lock,cap_sys_admin+ep <executable>
+   setcap cap_dac_read_search,cap_ipc_lock,cap_sys_admin+ep <executable>
 
 If physical addresses are not accessible,
 the following message will appear during EAL initialization::
diff -Nru dpdk-20.11.7/doc/guides/prog_guide/cryptodev_lib.rst dpdk-20.11.8/doc/guides/prog_guide/cryptodev_lib.rst
--- dpdk-20.11.7/doc/guides/prog_guide/cryptodev_lib.rst	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/doc/guides/prog_guide/cryptodev_lib.rst	2023-04-27 18:57:22.000000000 +0100
@@ -98,14 +98,10 @@
 The ``rte_cryptodev_config`` structure is used to pass the configuration
 parameters for socket selection and number of queue pairs.
 
-.. code-block:: c
-
-    struct rte_cryptodev_config {
-        int socket_id;
-        /**< Socket to allocate resources on */
-        uint16_t nb_queue_pairs;
-        /**< Number of queue pairs to configure on device */
-    };
+.. literalinclude:: ../../../lib/librte_cryptodev/rte_cryptodev.h
+   :language: c
+   :start-after: Structure rte_cryptodev_config 8<
+   :end-before: >8 End of structure rte_cryptodev_config.
 
 
 Configuration of Queue Pairs
@@ -121,13 +117,11 @@
                 const struct rte_cryptodev_qp_conf *qp_conf,
                 int socket_id)
 
-   struct rte_cryptodev_qp_conf {
-        uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
-        struct rte_mempool *mp_session;
-        /**< The mempool for creating session in sessionless mode */
-        struct rte_mempool *mp_session_private;
-        /**< The mempool for creating sess private data in sessionless mode */
-    };
+
+.. literalinclude:: ../../../lib/librte_cryptodev/rte_cryptodev.h
+   :language: c
+   :start-after: Structure rte_cryptodev_qp_conf 8<
+   :end-before: >8 End of structure rte_cryptodev_qp_conf.
 
 
 The fields ``mp_session`` and ``mp_session_private`` are used for creating
@@ -273,23 +267,10 @@
 features and capabilities. The ``rte_cryptodev_info`` structure contains all the
 relevant information for the device.
 
-.. code-block:: c
-
-    struct rte_cryptodev_info {
-        const char *driver_name;
-        uint8_t driver_id;
-        struct rte_device *device;
-
-        uint64_t feature_flags;
-
-        const struct rte_cryptodev_capabilities *capabilities;
-
-        unsigned max_nb_queue_pairs;
-
-        struct {
-            unsigned max_nb_sessions;
-        } sym;
-    };
+.. literalinclude:: ../../../lib/librte_cryptodev/rte_cryptodev.h
+   :language: c
+   :start-after: Structure rte_cryptodev_info 8<
+   :end-before: >8 End of structure rte_cryptodev_info.
 
 
 Operation Processing
@@ -506,22 +487,10 @@
 Also it is important to note that the order in which the
 transforms are passed indicates the order of the chaining.
 
-.. code-block:: c
-
-    struct rte_crypto_sym_xform {
-        struct rte_crypto_sym_xform *next;
-        /**< next xform in chain */
-        enum rte_crypto_sym_xform_type type;
-        /**< xform type */
-        union {
-            struct rte_crypto_auth_xform auth;
-            /**< Authentication / hash xform */
-            struct rte_crypto_cipher_xform cipher;
-            /**< Cipher xform */
-            struct rte_crypto_aead_xform aead;
-            /**< AEAD xform */
-        };
-    };
+.. literalinclude:: ../../../lib/librte_cryptodev/rte_crypto_sym.h
+   :language: c
+   :start-after: Structure rte_crypto_sym_xform 8<
+   :end-before: >8 End of structure rte_crypto_sym_xform.
 
 The API does not place a limit on the number of transforms that can be chained
 together but this will be limited by the underlying Crypto device poll mode
@@ -544,61 +513,11 @@
 specified in the session or the transform
 chain.
 
-.. code-block:: c
+.. literalinclude:: ../../../lib/librte_cryptodev/rte_crypto_sym.h
+   :language: c
+   :start-after: Structure rte_crypto_sym_op 8<
+   :end-before: >8 End of structure rte_crypto_sym_op.
 
-    struct rte_crypto_sym_op {
-        struct rte_mbuf *m_src;
-        struct rte_mbuf *m_dst;
-
-        union {
-            struct rte_cryptodev_sym_session *session;
-            /**< Handle for the initialised session context */
-            struct rte_crypto_sym_xform *xform;
-            /**< Session-less API Crypto operation parameters */
-        };
-
-        union {
-            struct {
-                struct {
-                    uint32_t offset;
-                    uint32_t length;
-                } data; /**< Data offsets and length for AEAD */
-
-                struct {
-                    uint8_t *data;
-                    rte_iova_t phys_addr;
-                } digest; /**< Digest parameters */
-
-                struct {
-                    uint8_t *data;
-                    rte_iova_t phys_addr;
-                } aad;
-                /**< Additional authentication parameters */
-            } aead;
-
-            struct {
-                struct {
-                    struct {
-                        uint32_t offset;
-                        uint32_t length;
-                    } data; /**< Data offsets and length for ciphering */
-                } cipher;
-
-                struct {
-                    struct {
-                        uint32_t offset;
-                        uint32_t length;
-                    } data;
-                    /**< Data offsets and length for authentication */
-
-                    struct {
-                        uint8_t *data;
-                        rte_iova_t phys_addr;
-                    } digest; /**< Digest parameters */
-                } auth;
-            };
-        };
-    };
 
 Synchronous mode
 ----------------
diff -Nru dpdk-20.11.7/doc/guides/prog_guide/event_timer_adapter.rst dpdk-20.11.8/doc/guides/prog_guide/event_timer_adapter.rst
--- dpdk-20.11.7/doc/guides/prog_guide/event_timer_adapter.rst	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/doc/guides/prog_guide/event_timer_adapter.rst	2023-04-27 18:57:22.000000000 +0100
@@ -35,7 +35,7 @@
 
 The Event Timer Adapter API represents each event timer with a generic struct,
 which contains an event and user metadata.  The ``rte_event_timer`` struct is
-defined in ``lib/librte_event/librte_event_timer_adapter.h``.
+defined in ``rte_event_timer_adapter.h``.
 
 .. _timer_expiry_event:
 
diff -Nru dpdk-20.11.7/doc/guides/rel_notes/release_20_11.rst dpdk-20.11.8/doc/guides/rel_notes/release_20_11.rst
--- dpdk-20.11.7/doc/guides/rel_notes/release_20_11.rst	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/doc/guides/rel_notes/release_20_11.rst	2023-04-27 18:57:22.000000000 +0100
@@ -3886,3 +3886,272 @@
 * net/iavf
 
    * vf_interrupt_pmd/nic_interrupt_VF_vfio_pci: l3fwd-power Wake up failed on X722 37d0
+
+20.11.8 Release Notes
+---------------------
+
+
+20.11.8 Fixes
+~~~~~~~~~~~~~
+
+* acl: fix crash on PPC64 with GCC 11
+* app/bbdev: add allocation checks
+* app/bbdev: check statistics failure
+* app/compress-perf: fix remaining data for ops
+* app/compress-perf: fix some typos
+* app/compress-perf: fix testing single operation
+* app/crypto-perf: fix number of segments
+* app/crypto-perf: fix test file memory leak
+* app/flow-perf: fix division or module by zero
+* app/testpmd: cleanup cleanly from signal
+* app/testpmd: fix crash on cleanup
+* app/testpmd: fix encap/decap size calculation
+* app/testpmd: fix forwarding stats for Tx dropped
+* app/testpmd: fix interactive mode on Windows
+* app/testpmd: fix interactive mode with no ports
+* app/testpmd: fix packet count in IEEE 1588 engine
+* app/testpmd: fix packet transmission in noisy VNF engine
+* app/testpmd: fix Tx preparation in checksum engine
+* baseband/acc: fix acc100 iteration counter in TB
+* baseband/acc: fix memory leak on acc100 close
+* build: fix dependencies lookup
+* build: fix toolchain definition
+* bus/ifpga: fix devargs handling
+* ci: switch to Ubuntu 20.04
+* cmdline: handle EOF as quit
+* common/mlx5: use just sufficient barrier for Arm
+* compressdev: fix empty devargs parsing
+* compressdev: fix end of driver list
+* crypto/ccp: remove some printf
+* cryptodev: fix empty devargs parsing
+* crypto/qat: fix stream cipher direction
+* crypto/snow3g: fix snow3g enqueue stat increment
+* devtools: fix escaped space in grep pattern
+* doc: add Linux capability to access physical addresses
+* doc: fix code blocks in cryptodev guide
+* doc: fix dependency setup in l2fwd-cat example guide
+* doc: fix pipeline example path in user guide
+* doc: fix reference to event timer header
+* eal/freebsd: fix lock in alarm callback
+* eal: use same atomic intrinsics for GCC and clang
+* eal/windows: fix pedantic build
+* eal/windows: mark memory config as complete
+* eventdev/eth_tx: fix devices loop
+* eventdev/timer: fix overflow
+* examples/cmdline: fix build with GCC 12
+* examples/qos_sched: fix config entries in wrong sections
+* examples/qos_sched: fix debug mode
+* examples/qos_sched: fix Tx port config when link down
+* examples/vm_power_manager: revert backported commit
+* fbarray: fix metadata dump
+* graph: fix node shrink
+* kni: fix build on RHEL 9.1
+* kni: fix possible starvation when mbufs are exhausted
+* kvargs: add API documentation for process callback
+* lib: remove empty return types from doxygen comments
+* mem: fix hugepage info mapping
+* mempool: fix get objects from mempool with cache
+* net/bnxt: fix link state change interrupt config
+* net/bnxt: fix RSS hash in mbuf
+* net/bnxt: fix Rx queue stats after queue stop and start
+* net/bnxt: fix Tx queue stats after queue stop and start
+* net/e1000: fix saving of stripped VLAN TCI
+* net/hns3: add verification of RSS types
+* net/hns3: allow adding queue buffer size hash rule
+* net/hns3: extract common functions to set Rx/Tx
+* net/hns3: fix burst mode query with dummy function
+* net/hns3: fix clearing RSS configuration
+* net/hns3: fix config struct used for conversion
+* net/hns3: fix duplicate RSS rule check
+* net/hns3: fix log about indirection table size
+* net/hns3: fix possible truncation of hash key when config
+* net/hns3: fix possible truncation of redirection table
+* net/hns3: fix RSS key size compatibility
+* net/hns3: fix warning on flush or destroy rule
+* net/hns3: refactor set RSS hash algorithm and key interface
+* net/hns3: reimplement hash flow function
+* net/hns3: remove useless code when destroy valid RSS rule
+* net/hns3: save hash algo to RSS filter list node
+* net/hns3: separate flow RSS config from RSS conf
+* net/hns3: separate setting and clearing RSS rule
+* net/hns3: separate setting hash algorithm
+* net/hns3: separate setting hash key
+* net/hns3: separate setting redirection table
+* net/hns3: separate setting RSS types
+* net/hns3: use hardware config to report hash key
+* net/hns3: use hardware config to report hash types
+* net/hns3: use hardware config to report redirection table
+* net/hns3: use new RSS rule to configure hardware
+* net/hns3: use RSS filter list to check duplicated rule
+* net/i40e: fix MAC loopback on X722
+* net/i40e: fix validation of flow transfer attribute
+* net/i40e: reduce interrupt interval in multi-driver mode
+* net/i40e: revert link status check on device start
+* net/iavf: add lock for VF commands
+* net/iavf: fix device stop during reset
+* net/iavf: protect insertion in flow list
+* net/ice: fix validation of flow transfer attribute
+* net/ipn3ke: fix representor name
+* net/ipn3ke: fix thread exit
+* net/ixgbe: enable IPv6 mask in flow rules
+* net/ixgbe: fix firmware version consistency
+* net/ixgbe: fix IPv6 mask in flow director
+* net/mlx5: check compressed CQE opcode in vectorized Rx
+* net/mlx5: fix build with GCC 12 and ASan
+* net/mlx5: fix CQE dump for Tx
+* net/mlx5: fix error CQE dumping for vectorized Rx
+* net/mlx5: fix flow sample with ConnectX-5
+* net/mlx5: fix hairpin Tx queue reference count
+* net/mlx5: fix sysfs port name translation
+* net/mlx5: ignore non-critical syndromes for Rx queue
+* net/nfp: fix firmware name derived from PCI name
+* net/nfp: fix getting RSS configuration
+* net/nfp: fix MTU configuration order
+* net/sfc: enforce fate action in transfer flow rules
+* net/txgbe: fix default signal quality value for KX/KX4
+* net/txgbe: fix interrupt loss
+* net/txgbe: fix packet type to parse from offload flags
+* net/txgbe: fix Rx buffer size in config register
+* net/vhost: add missing newline in logs
+* net/vhost: fix Rx interrupt
+* net/virtio: deduce IP length for TSO checksum
+* net/virtio: fix empty devargs parsing
+* net/virtio: remove address width limit for modern devices
+* net/virtio-user: fix device starting failure handling
+* pdump: fix build with GCC 12
+* raw/ifpga/base: fix init with multi-process
+* raw/skeleton: fix empty devargs parsing
+* raw/skeleton: fix selftest
+* reorder: fix sequence number mbuf field register
+* reorder: invalidate buffer from ready queue in drain
+* ring: silence GCC 12 warnings
+* telemetry: fix repeat display when callback don't init dict
+* test/bbdev: extend HARQ tolerance
+* test/bbdev: fix crash for non supported HARQ length
+* test/bbdev: remove check for invalid opaque data
+* test/crypto: add missing MAC-I to PDCP vectors
+* test/crypto: fix statistics error messages
+* test: fix segment length in packet generator
+* test/mbuf: fix mbuf reset test
+* test/mbuf: fix test with mbuf debug enabled
+* test/reorder: fix double free of drained buffers
+* vdpa/ifc: fix argument compatibility check
+* vdpa/ifc: fix reconnection in SW-assisted live migration
+* version: 20.11.8-rc1
+* vhost: decrease log level for unimplemented requests
+* vhost: fix doxygen warnings
+* vhost: fix net header settings in datapath
+* vhost: fix OOB access for invalid vhost ID
+* vhost: fix possible FD leaks
+* vhost: fix possible FD leaks on truncation
+
+20.11.8 Validation
+~~~~~~~~~~~~~~~~~~
+
+* Intel(R) Testing
+
+   * Basic Intel(R) NIC testing
+      * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Fedora36, RHEL8.4, etc.
+      * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
+      * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
+      * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc.
+      * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc.
+      * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc.
+
+   * Basic cryptodev and virtio testing
+      * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc.
+      * Cryptodev:
+         * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc.
+         * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc.
+
+
+* Nvidia(R) Testing
+
+   * Basic functionality via testpmd/example applications
+
+      * Tx/Rx
+      * xstats
+      * Timestamps
+      * Link status
+      * RTE flow and flow_director
+      * RSS
+      * VLAN filtering, stripping and insertion
+      * Checksum/TSO
+      * ptype
+      * link_status_interrupt example application
+      * l3fwd-power example application
+      * Multi-process example applications
+      * Hardware LRO tests
+
+   * Build tests
+
+      * Ubuntu 20.04.5 with MLNX_OFED_LINUX-5.9-0.5.6.0.
+      * Ubuntu 20.04.5 with rdma-core master (f0a079f).
+      * Ubuntu 20.04.5 with rdma-core v28.0.
+      * Ubuntu 18.04.6 with rdma-core v17.1.
+      * Ubuntu 18.04.6 with rdma-core master (f0a079f) (i386).
+      * Fedora 37 with rdma-core v41.0.
+      * Fedora 39 (Rawhide) with rdma-core v44.0.
+      * CentOS 7 7.9.2009 with rdma-core master (f0a079f).
+      * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.9-0.5.6.0.
+      * CentOS 8 8.4.2105 with rdma-core master (f0a079f).
+      * OpenSUSE Leap 15.4 with rdma-core v38.1.
+      * Windows Server 2019 with Clang 11.0.0.
+
+   * BlueField-2
+
+      * DOCA 1.5.1
+      * fw 24.35.2000
+
+   * ConnectX-7
+
+      * Ubuntu 20.04
+      * Driver MLNX_OFED_LINUX-5.9-0.5.6.0
+      * fw 22.36.1010
+
+   * ConnectX-6 Dx
+
+      * Ubuntu 20.04
+      * Driver MLNX_OFED_LINUX-5.9-0.5.6.0
+      * fw 22.36.1010
+
+
+* Red Hat(R) Testing
+
+   * Platform
+
+      * RHEL 8
+      * Kernel 5.14
+      * Qemu 6.2.0
+      * X540-AT2 NIC(ixgbe, 10G)
+
+   * Functionality
+
+      * Guest with device assignment(PF) throughput testing(1G hugepage size)
+      * Guest with device assignment(PF) throughput testing(2M hugepage size)
+      * Guest with device assignment(VF) throughput testing
+      * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
+      * PVP vhost-user 2Q throughput testing
+      * PVP vhost-user 1Q cross numa node  throughput testing
+      * Guest with vhost-user 2 queues throughput testing
+      * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect
+      * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect
+      * PVP  reconnect with dpdk-client, qemu-server: PASS
+      * PVP 1Q live migration testing
+      * PVP 1Q cross numa node live migration testing
+      * Guest with ovs+dpdk+vhost-user 1Q live migration testing
+      * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M)
+      * Guest with ovs+dpdk+vhost-user 2Q live migration testing
+      * Guest with ovs+dpdk+vhost-user 4Q live migration testing
+      * Host PF + DPDK testing
+      * Host VF + DPDK testing
+
+20.11.8 Known Issues
+~~~~~~~~~~~~~~~~~~~~
+
+* softnic
+
+   * metering/policing: creating a rule can sometimes fail
+
+* testpmd
+   * a "OP_DEL_RSS_INPUT_CFG" error can sometimes appear when exiting
diff -Nru dpdk-20.11.7/doc/guides/sample_app_ug/l2_forward_cat.rst dpdk-20.11.8/doc/guides/sample_app_ug/l2_forward_cat.rst
--- dpdk-20.11.7/doc/guides/sample_app_ug/l2_forward_cat.rst	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/doc/guides/sample_app_ug/l2_forward_cat.rst	2023-04-27 18:57:22.000000000 +0100
@@ -50,13 +50,12 @@
     * https://github.com/01org/intel-cmt-cat
 
 
-#. To compile the application export the path to PQoS lib
-   and the DPDK source tree and go to the example directory:
+To compile the application, export the path to PQoS lib:
 
-   .. code-block:: console
-
-       export PQOS_INSTALL_PATH=/path/to/libpqos
+.. code-block:: console
 
+   export CFLAGS=-I/path/to/intel-cmt-cat/include
+   export LDFLAGS=-L/path/to/intel-cmt-cat/lib
 
 To compile the sample application see :doc:`compiling`.
 
diff -Nru dpdk-20.11.7/doc/guides/sample_app_ug/pipeline.rst dpdk-20.11.8/doc/guides/sample_app_ug/pipeline.rst
--- dpdk-20.11.7/doc/guides/sample_app_ug/pipeline.rst	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/doc/guides/sample_app_ug/pipeline.rst	2023-04-27 18:57:22.000000000 +0100
@@ -58,7 +58,7 @@
 
 .. code-block:: console
 
-    $ ./<build_dir>/examples/dpdk-pipeline -c 0x3 -- -s examples/vxlan.cli
+    $ ./<build_dir>/examples/dpdk-pipeline -c 0x3 -- -s examples/pipeline/examples/vxlan.cli
 
 The application should start successfully and display as follows:
 
diff -Nru dpdk-20.11.7/drivers/baseband/acc100/rte_acc100_pmd.c dpdk-20.11.8/drivers/baseband/acc100/rte_acc100_pmd.c
--- dpdk-20.11.7/drivers/baseband/acc100/rte_acc100_pmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/baseband/acc100/rte_acc100_pmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -778,6 +778,7 @@
 		rte_free(d->tail_ptrs);
 		rte_free(d->info_ring);
 		rte_free(d->sw_rings_base);
+		rte_free(d->harq_layout);
 		d->sw_rings_base = NULL;
 		d->tail_ptrs = NULL;
 		d->info_ring = NULL;
@@ -3967,8 +3968,12 @@
 		/* CRC invalid if error exists */
 		if (!op->status)
 			op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;
-		op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
-				op->turbo_dec.iter_count);
+		if (q->op_type == RTE_BBDEV_OP_LDPC_DEC)
+			op->ldpc_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
+					op->ldpc_dec.iter_count);
+		else
+			op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,
+					op->turbo_dec.iter_count);
 
 		/* Check if this is the last desc in batch (Atomic Queue) */
 		if (desc->req.last_desc_in_batch) {
diff -Nru dpdk-20.11.7/drivers/baseband/turbo_sw/meson.build dpdk-20.11.8/drivers/baseband/turbo_sw/meson.build
--- dpdk-20.11.7/drivers/baseband/turbo_sw/meson.build	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/baseband/turbo_sw/meson.build	2023-04-27 18:57:22.000000000 +0100
@@ -6,15 +6,15 @@
 # check for FlexRAN SDK libraries for AVX2
 lib4g = cc.find_library('libturbo', dirs: [path + '/lib_turbo'], required: false)
 if lib4g.found()
-	ext_deps += cc.find_library('libturbo', dirs: [path + '/lib_turbo'], required: true)
-	ext_deps += cc.find_library('libcrc', dirs: [path + '/lib_crc'], required: true)
-	ext_deps += cc.find_library('librate_matching', dirs: [path + '/lib_rate_matching'], required: true)
-	ext_deps += cc.find_library('libcommon', dirs: [path + '/lib_common'], required: true)
-	ext_deps += cc.find_library('libstdc++', required: true)
-	ext_deps += cc.find_library('libirc', required: true)
-	ext_deps += cc.find_library('libimf', required: true)
-	ext_deps += cc.find_library('libipps', required: true)
-	ext_deps += cc.find_library('libsvml', required: true)
+	ext_deps += cc.find_library('turbo', dirs: [path + '/lib_turbo'], required: true)
+	ext_deps += cc.find_library('crc', dirs: [path + '/lib_crc'], required: true)
+	ext_deps += cc.find_library('rate_matching', dirs: [path + '/lib_rate_matching'], required: true)
+	ext_deps += cc.find_library('common', dirs: [path + '/lib_common'], required: true)
+	ext_deps += cc.find_library('stdc++', required: true)
+	ext_deps += cc.find_library('irc', required: true)
+	ext_deps += cc.find_library('imf', required: true)
+	ext_deps += cc.find_library('ipps', required: true)
+	ext_deps += cc.find_library('svml', required: true)
 	includes += include_directories(path + '/lib_turbo')
 	includes += include_directories(path + '/lib_crc')
 	includes += include_directories(path + '/lib_rate_matching')
@@ -25,10 +25,10 @@
 # check for FlexRAN SDK libraries for AVX512
 lib5g = cc.find_library('libldpc_decoder_5gnr', dirs: [path + '/lib_ldpc_decoder_5gnr'], required: false)
 if lib5g.found()
-	ext_deps += cc.find_library('libldpc_encoder_5gnr', dirs: [path + '/lib_ldpc_encoder_5gnr'], required: true)
-	ext_deps += cc.find_library('libldpc_decoder_5gnr', dirs: [path + '/lib_ldpc_decoder_5gnr'], required: true)
-	ext_deps += cc.find_library('libLDPC_ratematch_5gnr', dirs: [path + '/lib_LDPC_ratematch_5gnr'], required: true)
-	ext_deps += cc.find_library('librate_dematching_5gnr', dirs: [path + '/lib_rate_dematching_5gnr'], required: true)
+	ext_deps += cc.find_library('ldpc_encoder_5gnr', dirs: [path + '/lib_ldpc_encoder_5gnr'], required: true)
+	ext_deps += cc.find_library('ldpc_decoder_5gnr', dirs: [path + '/lib_ldpc_decoder_5gnr'], required: true)
+	ext_deps += cc.find_library('LDPC_ratematch_5gnr', dirs: [path + '/lib_LDPC_ratematch_5gnr'], required: true)
+	ext_deps += cc.find_library('rate_dematching_5gnr', dirs: [path + '/lib_rate_dematching_5gnr'], required: true)
 	includes += include_directories(path + '/lib_ldpc_encoder_5gnr')
 	includes += include_directories(path + '/lib_ldpc_decoder_5gnr')
 	includes += include_directories(path + '/lib_LDPC_ratematch_5gnr')
diff -Nru dpdk-20.11.7/drivers/bus/ifpga/ifpga_bus.c dpdk-20.11.8/drivers/bus/ifpga/ifpga_bus.c
--- dpdk-20.11.7/drivers/bus/ifpga/ifpga_bus.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/bus/ifpga/ifpga_bus.c	2023-04-27 18:57:22.000000000 +0100
@@ -136,6 +136,8 @@
 			goto end;
 		}
 		afu_pr_conf.pr_enable = 1;
+		strlcpy(afu_pr_conf.bs_path, path,
+			sizeof(afu_pr_conf.bs_path));
 	} else {
 		afu_pr_conf.pr_enable = 0;
 	}
@@ -167,7 +169,6 @@
 		rawdev->dev_ops->dev_start(rawdev))
 		goto end;
 
-	strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path));
 	if (rawdev->dev_ops &&
 		rawdev->dev_ops->firmware_load &&
 		rawdev->dev_ops->firmware_load(rawdev,
diff -Nru dpdk-20.11.7/drivers/common/mlx5/mlx5_common.h dpdk-20.11.8/drivers/common/mlx5/mlx5_common.h
--- dpdk-20.11.7/drivers/common/mlx5/mlx5_common.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/common/mlx5/mlx5_common.h	2023-04-27 18:57:22.000000000 +0100
@@ -199,7 +199,12 @@
 
 	if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
 		return MLX5_CQE_STATUS_HW_OWN;
-	rte_io_rmb();
+
+	/* Prevent speculative reading of other fields in CQE until
+	 * CQE is valid.
+	 */
+	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
 	if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
 		     op_code == MLX5_CQE_REQ_ERR))
 		return MLX5_CQE_STATUS_ERR;
diff -Nru dpdk-20.11.7/drivers/compress/qat/qat_comp_pmd.c dpdk-20.11.8/drivers/compress/qat/qat_comp_pmd.c
--- dpdk-20.11.7/drivers/compress/qat/qat_comp_pmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/compress/qat/qat_comp_pmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -30,7 +30,7 @@
 				RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
 				RTE_COMP_FF_STATEFUL_DECOMPRESSION,
 	 .window_size = {.min = 15, .max = 15, .increment = 0} },
-	{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
+	 RTE_COMP_END_OF_CAPABILITIES_LIST() };
 
 static void
 qat_comp_stats_get(struct rte_compressdev *dev,
diff -Nru dpdk-20.11.7/drivers/crypto/ccp/ccp_dev.c dpdk-20.11.8/drivers/crypto/ccp/ccp_dev.c
--- dpdk-20.11.7/drivers/crypto/ccp/ccp_dev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/crypto/ccp/ccp_dev.c	2023-04-27 18:57:22.000000000 +0100
@@ -361,7 +361,7 @@
 		if (ccp_get_bit(&cmd_q->lsbmask, j))
 			weight++;
 
-	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	CCP_LOG_DBG("Queue %d can access %d LSB regions  of mask  %lu\n",
 	       (int)cmd_q->id, weight, cmd_q->lsbmask);
 
 	return weight ? 0 : -EINVAL;
@@ -797,7 +797,7 @@
 		snprintf(dirname, sizeof(dirname), "%s/%s",
 			     SYSFS_PCI_DEVICES, d->d_name);
 		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
-			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			CCP_LOG_DBG("CCP : Detected CCP device with ID = 0x%x\n",
 			       ccp_id[ccp_type].device_id);
 			ret = ccp_probe_device(dirname, domain, bus, devid,
 					       function, ccp_type);
diff -Nru dpdk-20.11.7/drivers/crypto/ccp/ccp_pci.c dpdk-20.11.8/drivers/crypto/ccp/ccp_pci.c
--- dpdk-20.11.7/drivers/crypto/ccp/ccp_pci.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/crypto/ccp/ccp_pci.c	2023-04-27 18:57:22.000000000 +0100
@@ -11,6 +11,7 @@
 #include <rte_string_fns.h>
 
 #include "ccp_pci.h"
+#include "ccp_pmd_private.h"
 
 static const char * const uio_module_names[] = {
 	"igb_uio",
@@ -40,7 +41,7 @@
 		rewind(fp);
 	}
 	fclose(fp);
-	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	CCP_LOG_DBG("Insert igb_uio or uio_pci_generic kernel module(s)");
 	return -1;/* uio not inserted */
 }
 
diff -Nru dpdk-20.11.7/drivers/crypto/ccp/rte_ccp_pmd.c dpdk-20.11.8/drivers/crypto/ccp/rte_ccp_pmd.c
--- dpdk-20.11.7/drivers/crypto/ccp/rte_ccp_pmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/crypto/ccp/rte_ccp_pmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -342,7 +342,7 @@
 		goto init_error;
 	}
 
-	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	CCP_LOG_DBG("CCP : Crypto device count = %d\n", cryptodev_cnt);
 	dev->driver_id = ccp_cryptodev_driver_id;
 
 	/* register rx/tx burst functions for data path */
diff -Nru dpdk-20.11.7/drivers/crypto/qat/qat_sym_session.c dpdk-20.11.8/drivers/crypto/qat/qat_sym_session.c
--- dpdk-20.11.7/drivers/crypto/qat/qat_sym_session.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/crypto/qat/qat_sym_session.c	2023-04-27 18:57:22.000000000 +0100
@@ -1572,9 +1572,10 @@
 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
 		|| cdesc->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-	else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+		cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+	} else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
 	else
 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
diff -Nru dpdk-20.11.7/drivers/crypto/snow3g/rte_snow3g_pmd.c dpdk-20.11.8/drivers/crypto/snow3g/rte_snow3g_pmd.c
--- dpdk-20.11.7/drivers/crypto/snow3g/rte_snow3g_pmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/crypto/snow3g/rte_snow3g_pmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -371,7 +371,8 @@
 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
 		struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
 {
-	unsigned enqueued_op, processed_op;
+	unsigned int processed_op;
+	int ret;
 
 	switch (session->op) {
 	case SNOW3G_OP_ONLY_CIPHER:
@@ -412,9 +413,13 @@
 
 	if (unlikely(processed_op != 1))
 		return 0;
-	enqueued_op = rte_ring_enqueue(qp->processed_ops, op);
-	qp->qp_stats.enqueued_count += enqueued_op;
-	*accumulated_enqueued_ops += enqueued_op;
+
+	ret = rte_ring_enqueue(qp->processed_ops, op);
+	if (ret != 0)
+		return ret;
+
+	qp->qp_stats.enqueued_count += 1;
+	*accumulated_enqueued_ops += 1;
 
 	return 1;
 }
diff -Nru dpdk-20.11.7/drivers/net/bnxt/bnxt_ethdev.c dpdk-20.11.8/drivers/net/bnxt/bnxt_ethdev.c
--- dpdk-20.11.7/drivers/net/bnxt/bnxt_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/bnxt/bnxt_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -1030,7 +1030,6 @@
 		.tx_free_thresh = 32,
 		.tx_rs_thresh = 32,
 	};
-	eth_dev->data->dev_conf.intr_conf.lsc = 1;
 
 	dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
 	dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
@@ -5772,6 +5771,7 @@
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
 	bp = eth_dev->data->dev_private;
 
diff -Nru dpdk-20.11.7/drivers/net/bnxt/bnxt_rxq.c dpdk-20.11.8/drivers/net/bnxt/bnxt_rxq.c
--- dpdk-20.11.7/drivers/net/bnxt/bnxt_rxq.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/bnxt/bnxt_rxq.c	2023-04-27 18:57:22.000000000 +0100
@@ -446,6 +446,12 @@
 		return -EINVAL;
 	}
 
+	/* reset the previous stats for the rx_queue since the counters
+	 * will be cleared when the queue is started.
+	 */
+	memset(&bp->prev_rx_ring_stats[rx_queue_id], 0,
+	       sizeof(struct bnxt_ring_stats));
+
 	/* Set the queue state to started here.
 	 * We check the status of the queue while posting buffer.
 	 * If queue is it started, we do not post buffers for Rx.
diff -Nru dpdk-20.11.7/drivers/net/bnxt/bnxt_rxr.c dpdk-20.11.8/drivers/net/bnxt/bnxt_rxr.c
--- dpdk-20.11.7/drivers/net/bnxt/bnxt_rxr.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/bnxt/bnxt_rxr.c	2023-04-27 18:57:22.000000000 +0100
@@ -749,7 +749,6 @@
 
 skip_mark:
 	mbuf->hash.fdir.hi = 0;
-	mbuf->hash.fdir.id = 0;
 
 	return 0;
 }
diff -Nru dpdk-20.11.7/drivers/net/bnxt/bnxt_txr.c dpdk-20.11.8/drivers/net/bnxt/bnxt_txr.c
--- dpdk-20.11.7/drivers/net/bnxt/bnxt_txr.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/bnxt/bnxt_txr.c	2023-04-27 18:57:22.000000000 +0100
@@ -535,6 +535,12 @@
 	if (rc)
 		return rc;
 
+	/* reset the previous stats for the tx_queue since the counters
+	 * will be cleared when the queue is started.
+	 */
+	memset(&bp->prev_tx_ring_stats[tx_queue_id], 0,
+	       sizeof(struct bnxt_ring_stats));
+
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	txq->tx_started = true;
 	PMD_DRV_LOG(DEBUG, "Tx queue started\n");
diff -Nru dpdk-20.11.7/drivers/net/e1000/em_rxtx.c dpdk-20.11.8/drivers/net/e1000/em_rxtx.c
--- dpdk-20.11.7/drivers/net/e1000/em_rxtx.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/e1000/em_rxtx.c	2023-04-27 18:57:22.000000000 +0100
@@ -1031,6 +1031,7 @@
 		 *    - RX port identifier,
 		 *    - hardware offload data, if any:
 		 *      - IP checksum flag,
+		 *      - VLAN TCI, if any,
 		 *      - error flags.
 		 */
 		first_seg->port = rxq->port_id;
@@ -1040,7 +1041,7 @@
 					rx_desc_error_to_pkt_flags(rxd.errors);
 
 		/* Only valid if PKT_RX_VLAN set in pkt_flags */
-		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.special);
 
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_packet_prefetch((char *)first_seg->buf_addr +
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_cmd.h dpdk-20.11.8/drivers/net/hns3/hns3_cmd.h
--- dpdk-20.11.7/drivers/net/hns3/hns3_cmd.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_cmd.h	2023-04-27 18:57:22.000000000 +0100
@@ -579,6 +579,7 @@
 #define HNS3_RSS_CFG_TBL_SIZE_H		4
 #define HNS3_RSS_CFG_TBL_BW_H		2
 #define HNS3_RSS_CFG_TBL_BW_L		8
+#define HNS3_RSS_CFG_TBL_BW_H_M		0x3
 
 /* Configure the indirection table, opcode:0x0D07 */
 struct hns3_rss_indirection_table_cmd {
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.c dpdk-20.11.8/drivers/net/hns3/hns3_ethdev.c
--- dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -2606,7 +2606,7 @@
 	};
 
 	info->reta_size = hw->rss_ind_tbl_size;
-	info->hash_key_size = HNS3_RSS_KEY_SIZE;
+	info->hash_key_size = hw->rss_key_size;
 	info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
 
 	info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
@@ -3040,12 +3040,21 @@
 {
 	if (hw->rss_ind_tbl_size == 0 ||
 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
-		hns3_err(hw, "the size of hash lookup table configured (%u)"
-			      " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
-			      HNS3_RSS_IND_TBL_SIZE_MAX);
+		hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
+			 hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX);
 		return -EINVAL;
 	}
 
+	if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) {
+		 hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
+			   hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX);
+		 return -EINVAL;
+	}
+
+	if (hw->rss_key_size > HNS3_RSS_KEY_SIZE)
+		 hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)",
+			    hw->rss_key_size, HNS3_RSS_KEY_SIZE);
+
 	return 0;
 }
 
@@ -5068,8 +5077,7 @@
 	rte_spinlock_unlock(&hw->lock);
 
 	hns3_rx_scattered_calc(dev);
-	hns3_set_rxtx_function(dev);
-	hns3_mp_req_start_rxtx(dev);
+	hns3_start_rxtx_datapath(dev);
 	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);
 
 	/* Enable interrupt of all rx queues before enabling queues */
@@ -5175,12 +5183,7 @@
 	dev->data->dev_started = 0;
 
 	hw->adapter_state = HNS3_NIC_STOPPING;
-	hns3_set_rxtx_function(dev);
-	rte_wmb();
-	/* Disable datapath on secondary process. */
-	hns3_mp_req_stop_rxtx(dev);
-	/* Prevent crashes when queues are still in use. */
-	rte_delay_ms(hw->cfg_max_queues);
+	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
@@ -5670,11 +5673,7 @@
 		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
 	hw->mac.link_status = ETH_LINK_DOWN;
 
-	hns3_set_rxtx_function(eth_dev);
-	rte_wmb();
-	/* Disable datapath on secondary process. */
-	hns3_mp_req_stop_rxtx(eth_dev);
-	rte_delay_ms(hw->cfg_max_queues);
+	hns3_stop_rxtx_datapath(eth_dev);
 
 	rte_spinlock_lock(&hw->lock);
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
@@ -5707,8 +5706,7 @@
 	    hw->reset.level == HNS3_GLOBAL_RESET)
 		hns3_set_rst_done(hw);
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hns3_set_rxtx_function(eth_dev);
-	hns3_mp_req_start_rxtx(eth_dev);
+	hns3_start_rxtx_datapath(eth_dev);
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		hns3_service_handler(eth_dev);
 
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.h dpdk-20.11.8/drivers/net/hns3/hns3_ethdev.h
--- dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_ethdev.h	2023-04-27 18:57:22.000000000 +0100
@@ -896,15 +896,6 @@
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
-#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
-	do {								\
-		(act) = (actions) + (index);				\
-		while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) {	\
-			(index)++;					\
-			(act) = (actions) + (index);				\
-		}							\
-	} while (0)
-
 #define MSEC_PER_SEC              1000L
 #define USEC_PER_MSEC             1000L
 
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_ethdev_vf.c dpdk-20.11.8/drivers/net/hns3/hns3_ethdev_vf.c
--- dpdk-20.11.7/drivers/net/hns3/hns3_ethdev_vf.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_ethdev_vf.c	2023-04-27 18:57:22.000000000 +0100
@@ -1163,9 +1163,8 @@
 {
 	if (hw->rss_ind_tbl_size == 0 ||
 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
-		hns3_warn(hw, "the size of hash lookup table configured (%u)"
-			      " exceeds the maximum(%u)", hw->rss_ind_tbl_size,
-			      HNS3_RSS_IND_TBL_SIZE_MAX);
+		hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
+			 hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX);
 		return -EINVAL;
 	}
 
@@ -1963,12 +1962,7 @@
 	dev->data->dev_started = 0;
 
 	hw->adapter_state = HNS3_NIC_STOPPING;
-	hns3_set_rxtx_function(dev);
-	rte_wmb();
-	/* Disable datapath on secondary process. */
-	hns3_mp_req_stop_rxtx(dev);
-	/* Prevent crashes when queues are still in use. */
-	rte_delay_ms(hw->cfg_max_queues);
+	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
@@ -2242,8 +2236,7 @@
 	rte_spinlock_unlock(&hw->lock);
 
 	hns3_rx_scattered_calc(dev);
-	hns3_set_rxtx_function(dev);
-	hns3_mp_req_start_rxtx(dev);
+	hns3_start_rxtx_datapath(dev);
 	hns3vf_service_handler(dev);
 
 	/* Enable interrupt of all rx queues before enabling queues */
@@ -2407,11 +2400,7 @@
 		rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
 	hw->mac.link_status = ETH_LINK_DOWN;
 
-	hns3_set_rxtx_function(eth_dev);
-	rte_wmb();
-	/* Disable datapath on secondary process. */
-	hns3_mp_req_stop_rxtx(eth_dev);
-	rte_delay_ms(hw->cfg_max_queues);
+	hns3_stop_rxtx_datapath(eth_dev);
 
 	rte_spinlock_lock(&hw->lock);
 	if (hw->adapter_state == HNS3_NIC_STARTED ||
@@ -2441,8 +2430,8 @@
 	struct rte_eth_dev *eth_dev;
 
 	eth_dev = &rte_eth_devices[hw->data->port_id];
-	hns3_set_rxtx_function(eth_dev);
-	hns3_mp_req_start_rxtx(eth_dev);
+	hns3_start_rxtx_datapath(eth_dev);
+
 	if (hw->adapter_state == HNS3_NIC_STARTED) {
 		hns3vf_service_handler(eth_dev);
 
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_fdir.h dpdk-20.11.8/drivers/net/hns3/hns3_fdir.h
--- dpdk-20.11.7/drivers/net/hns3/hns3_fdir.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_fdir.h	2023-04-27 18:57:22.000000000 +0100
@@ -175,10 +175,18 @@
 	struct hns3_fdir_rule fdir_conf;
 };
 
+struct hns3_flow_rss_conf {
+	struct rte_flow_action_rss conf;
+	uint8_t key[HNS3_RSS_KEY_SIZE_MAX];  /* Hash key */
+	uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */
+	uint64_t pattern_type;
+	uint64_t hw_pctypes; /* packet types in driver */
+};
+
 /* rss filter list structure */
 struct hns3_rss_conf_ele {
 	TAILQ_ENTRY(hns3_rss_conf_ele) entries;
-	struct hns3_rss_conf filter_info;
+	struct hns3_flow_rss_conf filter_info;
 };
 
 /* hns3_flow memory list structure */
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_flow.c dpdk-20.11.8/drivers/net/hns3/hns3_flow.c
--- dpdk-20.11.7/drivers/net/hns3/hns3_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -5,10 +5,140 @@
 #include <rte_flow_driver.h>
 #include <rte_io.h>
 #include <rte_malloc.h>
+#include <rte_bitops.h>
 
 #include "hns3_ethdev.h"
 #include "hns3_logs.h"
 
+union hns3_filter_conf {
+	struct hns3_fdir_rule fdir_conf;
+	struct hns3_flow_rss_conf rss_conf;
+};
+
+struct hns3_filter_info {
+	enum rte_filter_type type;
+	union hns3_filter_conf conf;
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+	do { \
+		(act) = (actions) + (index); \
+		while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+			(index)++; \
+			(act) = (actions) + (index); \
+		} \
+	} while (0)
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
+	do { \
+		(item) = (pattern) + (index); \
+		while ((item)->type == RTE_FLOW_ITEM_TYPE_VOID) { \
+			(index)++; \
+			(item) = (pattern) + (index); \
+		} \
+	} while (0)
+
+#define HNS3_HASH_HDR_ETH	RTE_BIT64(0)
+#define HNS3_HASH_HDR_IPV4	RTE_BIT64(1)
+#define HNS3_HASH_HDR_IPV6	RTE_BIT64(2)
+#define HNS3_HASH_HDR_TCP	RTE_BIT64(3)
+#define HNS3_HASH_HDR_UDP	RTE_BIT64(4)
+#define HNS3_HASH_HDR_SCTP	RTE_BIT64(5)
+
+#define HNS3_HASH_VOID_NEXT_ALLOW	BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
+
+#define HNS3_HASH_ETH_NEXT_ALLOW	(BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
+					 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
+
+#define HNS3_HASH_IP_NEXT_ALLOW		(BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
+					 BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
+					 BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP))
+
+static const uint64_t hash_pattern_next_allow_items[] = {
+	[RTE_FLOW_ITEM_TYPE_VOID] = HNS3_HASH_VOID_NEXT_ALLOW,
+	[RTE_FLOW_ITEM_TYPE_ETH]  = HNS3_HASH_ETH_NEXT_ALLOW,
+	[RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_IP_NEXT_ALLOW,
+	[RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_IP_NEXT_ALLOW,
+};
+
+static const uint64_t hash_pattern_item_header[] = {
+	[RTE_FLOW_ITEM_TYPE_ETH]  = HNS3_HASH_HDR_ETH,
+	[RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_HDR_IPV4,
+	[RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_HDR_IPV6,
+	[RTE_FLOW_ITEM_TYPE_TCP]  = HNS3_HASH_HDR_TCP,
+	[RTE_FLOW_ITEM_TYPE_UDP]  = HNS3_HASH_HDR_UDP,
+	[RTE_FLOW_ITEM_TYPE_SCTP] = HNS3_HASH_HDR_SCTP,
+};
+
+#define HNS3_HASH_IPV4		(HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV4)
+#define HNS3_HASH_IPV4_TCP	(HNS3_HASH_HDR_ETH | \
+				 HNS3_HASH_HDR_IPV4 | \
+				 HNS3_HASH_HDR_TCP)
+#define HNS3_HASH_IPV4_UDP	(HNS3_HASH_HDR_ETH | \
+				 HNS3_HASH_HDR_IPV4 | \
+				 HNS3_HASH_HDR_UDP)
+#define HNS3_HASH_IPV4_SCTP	(HNS3_HASH_HDR_ETH | \
+				 HNS3_HASH_HDR_IPV4 | \
+				 HNS3_HASH_HDR_SCTP)
+#define HNS3_HASH_IPV6		(HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV6)
+#define HNS3_HASH_IPV6_TCP	(HNS3_HASH_HDR_ETH | \
+				 HNS3_HASH_HDR_IPV6 | \
+				 HNS3_HASH_HDR_TCP)
+#define HNS3_HASH_IPV6_UDP	(HNS3_HASH_HDR_ETH | \
+				 HNS3_HASH_HDR_IPV6 | \
+				 HNS3_HASH_HDR_UDP)
+#define HNS3_HASH_IPV6_SCTP	(HNS3_HASH_HDR_ETH | \
+				 HNS3_HASH_HDR_IPV6 | \
+				 HNS3_HASH_HDR_SCTP)
+
+static const struct hns3_hash_map_info {
+	/* flow type specified, zero means action works for all flow types. */
+	uint64_t pattern_type;
+	uint64_t rss_pctype; /* packet type with prefix RTE_ETH_RSS_xxx */
+	uint64_t l3l4_types; /* Supported L3/L4 RSS types for this packet type */
+	uint64_t hw_pctype; /* packet type in driver */
+	uint64_t tuple_mask; /* full tuples of the hw_pctype */
+} hash_map_table[] = {
+	/* IPV4 */
+	{ HNS3_HASH_IPV4,
+	  ETH_RSS_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST,
+	  HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M },
+	{ HNS3_HASH_IPV4,
+	  ETH_RSS_NONFRAG_IPV4_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST,
+	  HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M },
+	{ HNS3_HASH_IPV4,
+	  ETH_RSS_FRAG_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST,
+	  HNS3_RSS_PCTYPE_IPV4_FLAG, HNS3_RSS_TUPLE_IPV4_FLAG_M },
+	{ HNS3_HASH_IPV4_TCP,
+	  ETH_RSS_NONFRAG_IPV4_TCP, HNS3_RSS_SUPPORT_L3L4,
+	  HNS3_RSS_PCTYPE_IPV4_TCP, HNS3_RSS_TUPLE_IPV4_TCP_M },
+	{ HNS3_HASH_IPV4_UDP,
+	  ETH_RSS_NONFRAG_IPV4_UDP, HNS3_RSS_SUPPORT_L3L4,
+	  HNS3_RSS_PCTYPE_IPV4_UDP, HNS3_RSS_TUPLE_IPV4_UDP_M },
+	{ HNS3_HASH_IPV4_SCTP,
+	  ETH_RSS_NONFRAG_IPV4_SCTP, HNS3_RSS_SUPPORT_L3L4,
+	  HNS3_RSS_PCTYPE_IPV4_SCTP, HNS3_RSS_TUPLE_IPV4_SCTP_M },
+	/* IPV6 */
+	{ HNS3_HASH_IPV6,
+	  ETH_RSS_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST,
+	  HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M },
+	{ HNS3_HASH_IPV6,
+	  ETH_RSS_NONFRAG_IPV6_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST,
+	  HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M },
+	{ HNS3_HASH_IPV6,
+	  ETH_RSS_FRAG_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST,
+	  HNS3_RSS_PCTYPE_IPV6_FLAG, HNS3_RSS_TUPLE_IPV6_FLAG_M },
+	{ HNS3_HASH_IPV6_TCP,
+	  ETH_RSS_NONFRAG_IPV6_TCP, HNS3_RSS_SUPPORT_L3L4,
+	  HNS3_RSS_PCTYPE_IPV6_TCP, HNS3_RSS_TUPLE_IPV6_TCP_M },
+	{ HNS3_HASH_IPV6_UDP,
+	  ETH_RSS_NONFRAG_IPV6_UDP, HNS3_RSS_SUPPORT_L3L4,
+	  HNS3_RSS_PCTYPE_IPV6_UDP, HNS3_RSS_TUPLE_IPV6_UDP_M },
+	{ HNS3_HASH_IPV6_SCTP,
+	  ETH_RSS_NONFRAG_IPV6_SCTP, HNS3_RSS_SUPPORT_L3L4,
+	  HNS3_RSS_PCTYPE_IPV6_SCTP, HNS3_RSS_TUPLE_IPV6_SCTP_M },
+};
+
 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
 
@@ -78,7 +208,7 @@
 }
 
 /*
- * This function is used to find rss general action.
+ * This function is used to parse filter type.
  * 1. As we know RSS is used to spread packets among several queues, the flow
  *    API provide the struct rte_flow_action_rss, user could config its field
  *    sush as: func/level/types/key/queue to control RSS function.
@@ -86,16 +216,18 @@
  *    implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
  *    which action is RSS queues region.
  * 3. When action is RSS, we use the following rule to distinguish:
- *    Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
- *            region configuration.
+ *    Case 1: pattern has ETH and all fields in RSS action except 'queues' are
+ *            zero or default, indicate it is queue region configuration.
  *    Case other: an rss general action.
  */
-static const struct rte_flow_action *
-hns3_find_rss_general_action(const struct rte_flow_item pattern[],
-			     const struct rte_flow_action actions[])
+static void
+hns3_parse_filter_type(const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct hns3_filter_info *filter_info)
 {
+	const struct rte_flow_action_rss *rss_act;
 	const struct rte_flow_action *act = NULL;
-	const struct hns3_rss_conf *rss;
+	bool only_has_queues = false;
 	bool have_eth = false;
 
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -104,8 +236,10 @@
 			break;
 		}
 	}
-	if (!act)
-		return NULL;
+	if (act == NULL) {
+		filter_info->type = RTE_ETH_FILTER_FDIR;
+		return;
+	}
 
 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
 		if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
@@ -114,19 +248,21 @@
 		}
 	}
 
-	rss = act->conf;
-	if (have_eth && rss->conf.queue_num) {
+	rss_act = act->conf;
+	only_has_queues = (rss_act->queue_num > 0) &&
+			  (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
+			   rss_act->types == 0 && rss_act->key_len == 0);
+	if (have_eth && only_has_queues) {
 		/*
-		 * Pattern have ETH and action's queue_num > 0, indicate this is
-		 * queue region configuration.
-		 * Because queue region is implemented by FDIR + RSS in hns3
-		 * hardware, it needs to enter FDIR process, so here return NULL
-		 * to avoid enter RSS process.
+		 * Pattern has ETH and all fields in RSS action except 'queues'
+		 * are zero or default, which indicates this is queue region
+		 * configuration.
 		 */
-		return NULL;
+		filter_info->type = RTE_ETH_FILTER_FDIR;
+		return;
 	}
 
-	return act;
+	filter_info->type = RTE_ETH_FILTER_HASH;
 }
 
 static inline struct hns3_flow_counter *
@@ -1225,7 +1361,6 @@
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
-	struct hns3_rss_conf_ele *rss_filter_ptr;
 	struct hns3_flow_mem *flow_node;
 
 	fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
@@ -1235,13 +1370,6 @@
 		fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
 	}
 
-	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
-	while (rss_filter_ptr) {
-		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
-		rte_free(rss_filter_ptr);
-		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
-	}
-
 	flow_node = TAILQ_FIRST(&hw->flow_list);
 	while (flow_node) {
 		TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
@@ -1252,227 +1380,478 @@
 }
 
 static bool
+hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp,
+			const struct rte_flow_action_rss *with)
+{
+	if (comp->key_len != with->key_len)
+		return false;
+
+	if (with->key_len == 0)
+		return true;
+
+	if (comp->key == NULL && with->key == NULL)
+		return true;
+
+	if (!(comp->key != NULL && with->key != NULL))
+		return false;
+
+	return !memcmp(comp->key, with->key, with->key_len);
+}
+
+static bool
+hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp,
+			   const struct rte_flow_action_rss *with)
+{
+	if (comp->queue_num != with->queue_num)
+		return false;
+
+	if (with->queue_num == 0)
+		return true;
+
+	if (comp->queue == NULL && with->queue == NULL)
+		return true;
+
+	if (!(comp->queue != NULL && with->queue != NULL))
+		return false;
+
+	return !memcmp(comp->queue, with->queue, with->queue_num);
+}
+
+static bool
 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
 		     const struct rte_flow_action_rss *with)
 {
-	bool rss_key_is_same;
-	bool func_is_same;
+	bool same_level;
+	bool same_types;
+	bool same_func;
+
+	same_level = (comp->level == with->level);
+	same_types = (comp->types == with->types);
+	same_func = (comp->func == with->func);
+
+	return same_level && same_types && same_func &&
+		hns3_flow_rule_key_same(comp, with) &&
+		hns3_flow_rule_queues_same(comp, with);
+}
 
+static bool
+hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types)
+{
 	/*
-	 * When user flush all RSS rule, RSS func is set invalid with
-	 * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
-	 * flushed, any validate RSS func is different with it before
-	 * flushed. Others, when user create an action RSS with RSS func
-	 * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
-	 * between continuous RSS flow.
+	 * Some hardware don't support to use src/dst port fields to hash
+	 * for IPV6 SCTP packet type.
 	 */
-	if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
-		func_is_same = false;
-	else
-		func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
-				(comp->func == with->func) : true;
+	if (types & ETH_RSS_NONFRAG_IPV6_SCTP &&
+	    types & HNS3_RSS_SUPPORT_L4_SRC_DST &&
+	    !hw->rss_info.ipv6_sctp_offload_supported)
+		return false;
 
-	if (with->key_len == 0 || with->key == NULL)
-		rss_key_is_same = 1;
-	else
-		rss_key_is_same = comp->key_len == with->key_len &&
-		!memcmp(comp->key, with->key, with->key_len);
+	return true;
+}
 
-	return (func_is_same && rss_key_is_same &&
-		comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
-		comp->level == with->level &&
-		comp->queue_num == with->queue_num &&
-		!memcmp(comp->queue, with->queue,
-			sizeof(*with->queue) * with->queue_num));
+static int
+hns3_flow_parse_hash_func(const struct rte_flow_action_rss *rss_act,
+			  struct hns3_flow_rss_conf *rss_conf,
+			  struct rte_flow_error *error)
+{
+	if (rss_act->func >= RTE_ETH_HASH_FUNCTION_MAX)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL, "RSS hash func are not supported");
+
+	rss_conf->conf.func = rss_act->func;
+	return 0;
 }
 
 static int
-hns3_rss_conf_copy(struct hns3_rss_conf *out,
-		   const struct rte_flow_action_rss *in)
+hns3_flow_parse_hash_key(struct hns3_hw *hw,
+			 const struct rte_flow_action_rss *rss_act,
+			 struct hns3_flow_rss_conf *rss_conf,
+			 struct rte_flow_error *error)
 {
-	if (in->key_len > RTE_DIM(out->key) ||
-	    in->queue_num > RTE_DIM(out->queue))
-		return -EINVAL;
-	if (in->key == NULL && in->key_len)
-		return -EINVAL;
-	out->conf = (struct rte_flow_action_rss) {
-		.func = in->func,
-		.level = in->level,
-		.types = in->types,
-		.key_len = in->key_len,
-		.queue_num = in->queue_num,
-	};
-	out->conf.queue = memcpy(out->queue, in->queue,
-				sizeof(*in->queue) * in->queue_num);
-	if (in->key)
-		out->conf.key = memcpy(out->key, in->key, in->key_len);
+	if (rss_act->key_len != hw->rss_key_size)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL, "invalid RSS key length");
+
+	if (rss_act->key != NULL)
+		memcpy(rss_conf->key, rss_act->key, rss_act->key_len);
+	else
+		memcpy(rss_conf->key, hns3_hash_key,
+			RTE_MIN(sizeof(hns3_hash_key), rss_act->key_len));
+	/* Need to record if user sets hash key. */
+	rss_conf->conf.key = rss_act->key;
+	rss_conf->conf.key_len = rss_act->key_len;
 
 	return 0;
 }
 
-/*
- * This function is used to parse rss action validation.
- */
 static int
-hns3_parse_rss_filter(struct rte_eth_dev *dev,
-		      const struct rte_flow_action *actions,
-		      struct rte_flow_error *error)
+hns3_flow_parse_queues(struct hns3_hw *hw,
+		       const struct rte_flow_action_rss *rss_act,
+		       struct hns3_flow_rss_conf *rss_conf,
+		       struct rte_flow_error *error)
 {
-	struct hns3_adapter *hns = dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
-	struct hns3_rss_conf *rss_conf = &hw->rss_info;
-	const struct rte_flow_action_rss *rss;
-	const struct rte_flow_action *act;
-	uint32_t act_index = 0;
-	uint16_t n;
+	uint16_t i;
 
-	NEXT_ITEM_OF_ACTION(act, actions, act_index);
-	rss = act->conf;
+	if (rss_act->queue_num > hw->rss_ind_tbl_size)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL,
+					  "queue number can not exceed RSS indirection table.");
 
-	if (rss == NULL) {
-		return rte_flow_error_set(error, EINVAL,
+	if (rss_act->queue_num > HNS3_RSS_QUEUES_BUFFER_NUM)
+		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  act, "no valid queues");
+					  NULL,
+					  "queue number configured exceeds queue buffer size driver supported");
+
+	for (i = 0; i < rss_act->queue_num; i++) {
+		if (rss_act->queue[i] >= hw->alloc_rss_size)
+			return rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+						NULL,
+						"queue id must be less than queue number allocated to a TC");
 	}
 
-	if (rss->queue_num > RTE_DIM(rss_conf->queue))
-		return rte_flow_error_set(error, ENOTSUP,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
-					  "queue number configured exceeds "
-					  "queue buffer size driver supported");
+	memcpy(rss_conf->queue, rss_act->queue,
+	       rss_act->queue_num * sizeof(rss_conf->queue[0]));
+	rss_conf->conf.queue = rss_conf->queue;
+	rss_conf->conf.queue_num = rss_act->queue_num;
 
-	for (n = 0; n < rss->queue_num; n++) {
-		if (rss->queue[n] < hw->alloc_rss_size)
-			continue;
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
-					  "queue id must be less than queue number allocated to a TC");
+	return 0;
+}
+
+static int
+hns3_flow_get_hw_pctype(struct hns3_hw *hw,
+			const struct rte_flow_action_rss *rss_act,
+			const struct hns3_hash_map_info *map,
+			struct hns3_flow_rss_conf *rss_conf,
+			struct rte_flow_error *error)
+{
+	uint64_t l3l4_src_dst, l3l4_refine, left_types;
+
+	if (rss_act->types == 0) {
+		/* Disable RSS hash of this packet type if types is zero. */
+		rss_conf->hw_pctypes |= map->hw_pctype;
+		return 0;
 	}
 
-	if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
+	/*
+	 * Can not have extra types except rss_pctype and l3l4_type in this map.
+	 */
+	left_types = ~map->rss_pctype & rss_act->types;
+	if (left_types & ~map->l3l4_types)
 		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  act,
-					  "Flow types is unsupported by "
-					  "hns3's RSS");
-	if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
-		return rte_flow_error_set(error, ENOTSUP,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
-					  "RSS hash func are not supported");
-	if (rss->level)
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "cannot set extra types.");
+
+	l3l4_src_dst = left_types;
+	/* L3/L4 SRC and DST shouldn't be specified at the same time. */
+	l3l4_refine = rte_eth_rss_hf_refine(l3l4_src_dst);
+	if (l3l4_refine != l3l4_src_dst)
 		return rte_flow_error_set(error, ENOTSUP,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
-					  "a nonzero RSS encapsulation level is not supported");
-	if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "cannot specify L3_SRC/DST_ONLY or L4_SRC/DST_ONLY at the same.");
+
+	if (!hns3_valid_ipv6_sctp_rss_types(hw, rss_act->types))
 		return rte_flow_error_set(error, ENOTSUP,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
-					  "RSS hash key must be exactly 40 bytes");
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "hardware doesn't support to use L4 src/dst to hash for IPV6-SCTP.");
 
-	/*
-	 * For Kunpeng920 and Kunpeng930 NIC hardware, it is not supported to
-	 * use dst port/src port fields to RSS hash for the following packet
-	 * types.
-	 * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
-	 * Besides, for Kunpeng920, The NIC hardware is not supported to use
-	 * src/dst port fields to RSS hash for IPV6 SCTP packet type.
-	 */
-	if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) &&
-	   (rss->types & ETH_RSS_IP ||
-	   (!hw->rss_info.ipv6_sctp_offload_supported &&
-	   rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)))
-		return rte_flow_error_set(error, EINVAL,
+	rss_conf->hw_pctypes |= map->hw_pctype;
+
+	return 0;
+}
+
+static int
+hns3_flow_parse_rss_types_by_ptype(struct hns3_hw *hw,
+				   const struct rte_flow_action_rss *rss_act,
+				   uint64_t pattern_type,
+				   struct hns3_flow_rss_conf *rss_conf,
+				   struct rte_flow_error *error)
+{
+	const struct hns3_hash_map_info *map;
+	bool matched = false;
+	uint16_t i;
+	int ret;
+
+	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
+		map = &hash_map_table[i];
+		if (map->pattern_type != pattern_type) {
+			/*
+			 * If the target pattern type is already matched with
+			 * the one before this pattern in the hash map table,
+			 * no need to continue walk.
+			 */
+			if (matched)
+				break;
+			continue;
+		}
+		matched = true;
+
+		/*
+		 * If pattern type is matched and the 'types' is zero, all packet flow
+		 * types related to this pattern type disable RSS hash.
+		 * Otherwise, RSS types must match the pattern type and cannot have no
+		 * extra or unsupported types.
+		 */
+		if (rss_act->types != 0 && !(map->rss_pctype & rss_act->types))
+			continue;
+
+		ret = hns3_flow_get_hw_pctype(hw, rss_act, map, rss_conf, error);
+		if (ret != 0)
+			return ret;
+	}
+
+	if (rss_conf->hw_pctypes != 0)
+		return 0;
+
+	if (matched)
+		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  &rss->types,
-					  "input RSS types are not supported");
+					  NULL, "RSS types are unsupported");
 
-	act_index++;
+	return rte_flow_error_set(error, ENOTSUP,
+				  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				  NULL, "Pattern specified is unsupported");
+}
 
-	/* Check if the next not void action is END */
-	NEXT_ITEM_OF_ACTION(act, actions, act_index);
-	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
-		memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION,
-					  act, "Not supported action.");
+static uint64_t
+hns3_flow_get_all_hw_pctypes(uint64_t types)
+{
+	uint64_t hw_pctypes = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
+		if (types & hash_map_table[i].rss_pctype)
+			hw_pctypes |= hash_map_table[i].hw_pctype;
 	}
 
-	return 0;
+	return hw_pctypes;
 }
 
 static int
-hns3_disable_rss(struct hns3_hw *hw)
+hns3_flow_parse_rss_types(struct hns3_hw *hw,
+			  const struct rte_flow_action_rss *rss_act,
+			  uint64_t pattern_type,
+			  struct hns3_flow_rss_conf *rss_conf,
+			  struct rte_flow_error *error)
 {
+	rss_conf->conf.types = rss_act->types;
+
+	/* no pattern specified to set global RSS types. */
+	if (pattern_type == 0) {
+		if (!hns3_check_rss_types_valid(hw, rss_act->types))
+			return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					NULL, "RSS types is invalid.");
+		rss_conf->hw_pctypes =
+				hns3_flow_get_all_hw_pctypes(rss_act->types);
+		return 0;
+	}
+
+	return hns3_flow_parse_rss_types_by_ptype(hw, rss_act, pattern_type,
+						  rss_conf, error);
+}
+
+static int
+hns3_flow_parse_hash_global_conf(struct rte_eth_dev *dev,
+				 const struct rte_flow_action_rss *rss_act,
+				 struct hns3_flow_rss_conf *rss_conf,
+				 struct rte_flow_error *error)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret;
 
-	ret = hns3_set_rss_tuple_by_rss_hf(hw, 0);
-	if (ret)
+	ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error);
+	if (ret != 0)
 		return ret;
 
-	return 0;
+	if (rss_act->queue_num > 0) {
+		ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error);
+		if (ret != 0)
+			return ret;
+	}
+
+	if (rss_act->key_len > 0) {
+		ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error);
+		if (ret != 0)
+			return ret;
+	}
+
+	return hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type,
+					 rss_conf, error);
 }
 
-static void
-hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
+static int
+hns3_flow_parse_pattern_type(const struct rte_flow_item pattern[],
+			     uint64_t *ptype, struct rte_flow_error *error)
 {
-	if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
-		hns3_warn(hw, "Default RSS hash key to be set");
-		rss_conf->key = hns3_hash_key;
-		rss_conf->key_len = HNS3_RSS_KEY_SIZE;
+	enum rte_flow_item_type pre_type = RTE_FLOW_ITEM_TYPE_VOID;
+	const char *message = "Pattern specified isn't supported";
+	uint64_t item_hdr, pattern_hdrs = 0;
+	enum rte_flow_item_type cur_type;
+
+	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+		if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
+			continue;
+		if (pattern->mask || pattern->spec || pattern->last) {
+			message = "Header info shouldn't be specified";
+			goto unsup;
+		}
+
+		/* Check the sub-item allowed by the previous item . */
+		if (pre_type >= RTE_DIM(hash_pattern_next_allow_items) ||
+		    !(hash_pattern_next_allow_items[pre_type] &
+				BIT_ULL(pattern->type)))
+			goto unsup;
+
+		cur_type = pattern->type;
+		/* Unsupported for current type being greater than array size. */
+		if (cur_type >= RTE_DIM(hash_pattern_item_header))
+			goto unsup;
+
+		/* The value is zero, which means unsupported current header. */
+		item_hdr = hash_pattern_item_header[cur_type];
+		if (item_hdr == 0)
+			goto unsup;
+
+		/* Have duplicate pattern header. */
+		if (item_hdr & pattern_hdrs)
+			goto unsup;
+		pre_type = cur_type;
+		pattern_hdrs |= item_hdr;
 	}
+
+	if (pattern_hdrs != 0) {
+		*ptype = pattern_hdrs;
+		return 0;
+	}
+
+unsup:
+	return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+				  pattern, message);
 }
 
 static int
-hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
-			 uint8_t *hash_algo)
+hns3_flow_parse_pattern_act(struct rte_eth_dev *dev,
+			    const struct rte_flow_item pattern[],
+			    const struct rte_flow_action_rss *rss_act,
+			    struct hns3_flow_rss_conf *rss_conf,
+			    struct rte_flow_error *error)
 {
-	enum rte_eth_hash_function algo_func = *func;
-	switch (algo_func) {
-	case RTE_ETH_HASH_FUNCTION_DEFAULT:
-		/* Keep *hash_algo as what it used to be */
-		algo_func = hw->rss_info.conf.func;
-		break;
-	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
-		*hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
-		break;
-	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
-		*hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
-		break;
-	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
-		*hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
-		break;
-	default:
-		hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
-			 algo_func);
-		return -EINVAL;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error);
+	if (ret != 0)
+		return ret;
+
+	if (rss_act->key_len > 0) {
+		ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error);
+		if (ret != 0)
+			return ret;
 	}
-	*func = algo_func;
+
+	if (rss_act->queue_num > 0) {
+		ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error);
+		if (ret != 0)
+			return ret;
+	}
+
+	ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type,
+					   error);
+	if (ret != 0)
+		return ret;
+
+	ret = hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type,
+					rss_conf, error);
+	if (ret != 0)
+		return ret;
+
+	if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT ||
+	    rss_act->key_len > 0 || rss_act->queue_num > 0)
+		hns3_warn(hw, "hash func, key and queues are global config, which work for all flow types. "
+			  "Recommend: don't set them together with pattern.");
 
 	return 0;
 }
 
+static bool
+hns3_rss_action_is_dup(struct hns3_hw *hw,
+		       const struct hns3_flow_rss_conf *conf)
+{
+	struct hns3_rss_conf_ele *filter;
+
+	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
+		if (conf->pattern_type != filter->filter_info.pattern_type)
+			continue;
+
+		if (hns3_action_rss_same(&filter->filter_info.conf, &conf->conf))
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * This function is used to parse rss action validation.
+ */
 static int
-hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
+hns3_parse_rss_filter(struct rte_eth_dev *dev,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action *actions,
+		      struct hns3_flow_rss_conf *rss_conf,
+		      struct rte_flow_error *error)
 {
+	struct hns3_adapter *hns = dev->data->dev_private;
+	const struct rte_flow_action_rss *rss_act;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item *pat;
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t index = 0;
 	int ret;
 
-	hns3_parse_rss_key(hw, rss_config);
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION,
+					  &actions[1],
+					  "Only support one action for RSS.");
 
-	ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
-				       &hw->rss_info.hash_algo);
-	if (ret)
-		return ret;
+	rss_act = (const struct rte_flow_action_rss *)act->conf;
+	if (rss_act == NULL) {
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  act, "lost RSS action configuration");
+	}
 
-	ret = hns3_set_rss_algo_key(hw, rss_config->key);
-	if (ret)
-		return ret;
+	if (rss_act->level != 0)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  act,
+					  "RSS level is not supported");
 
-	hw->rss_info.conf.func = rss_config->func;
+	index = 0;
+	NEXT_ITEM_OF_PATTERN(pat, pattern, index);
+	if (pat[0].type == RTE_FLOW_ITEM_TYPE_END) {
+		rss_conf->pattern_type = 0;
+		ret = hns3_flow_parse_hash_global_conf(dev, rss_act,
+						       rss_conf, error);
+	} else {
+		ret = hns3_flow_parse_pattern_act(dev, pat, rss_act,
+						  rss_conf, error);
+	}
+	if (ret != 0)
+		return ret;
 
-	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types);
-	if (ret)
-		hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
+	if (hns3_rss_action_is_dup(hw, rss_conf))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  act, "duplicate RSS rule");
 
-	return ret;
+	return 0;
 }
 
 static int
@@ -1484,8 +1863,6 @@
 	uint32_t i;
 
 	/* Fill in redirection table */
-	memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
-	       sizeof(hw->rss_info.rss_indirection_tbl));
 	for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
 		j %= num;
 		if (conf->queue[j] >= hw->alloc_rss_size) {
@@ -1500,82 +1877,105 @@
 	return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
 }
 
+static uint64_t
+hns3_flow_get_pctype_tuple_mask(uint64_t hw_pctype)
+{
+	uint64_t tuple_mask = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DIM(hash_map_table); i++) {
+		if (hw_pctype == hash_map_table[i].hw_pctype) {
+			tuple_mask = hash_map_table[i].tuple_mask;
+			break;
+		}
+	}
+
+	return tuple_mask;
+}
+
+static int
+hns3_flow_set_rss_ptype_tuple(struct hns3_hw *hw,
+			      struct hns3_flow_rss_conf *rss_conf)
+{
+	uint64_t old_tuple_fields, new_tuple_fields;
+	uint64_t hw_pctypes, tuples, tuple_mask = 0;
+	bool cfg_global_tuple;
+	int ret;
+
+	cfg_global_tuple = (rss_conf->pattern_type == 0);
+	if (!cfg_global_tuple) {
+		/*
+		 * To ensure that different packets do not affect each other,
+		 * we have to first read all tuple fields, and then only modify
+		 * the tuples for the specified packet type.
+		 */
+		ret = hns3_get_rss_tuple_field(hw, &old_tuple_fields);
+		if (ret != 0)
+			return ret;
+
+		new_tuple_fields = old_tuple_fields;
+		hw_pctypes = rss_conf->hw_pctypes;
+		while (hw_pctypes > 0) {
+			uint32_t idx = rte_bsf64(hw_pctypes);
+			uint64_t pctype = BIT_ULL(idx);
+
+			tuple_mask = hns3_flow_get_pctype_tuple_mask(pctype);
+			tuples = hns3_rss_calc_tuple_filed(rss_conf->conf.types);
+			new_tuple_fields &= ~tuple_mask;
+			new_tuple_fields |= tuples;
+			hw_pctypes &= ~pctype;
+		}
+	} else {
+		new_tuple_fields =
+			hns3_rss_calc_tuple_filed(rss_conf->conf.types);
+	}
+
+	ret = hns3_set_rss_tuple_field(hw, new_tuple_fields);
+	if (ret != 0)
+		return ret;
+
+	hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64,
+		  old_tuple_fields, new_tuple_fields);
+
+	return 0;
+}
+
 static int
 hns3_config_rss_filter(struct hns3_hw *hw,
-		       const struct hns3_rss_conf *conf, bool add)
+		       struct hns3_flow_rss_conf *rss_conf)
 {
-	struct hns3_rss_conf *rss_info;
-	uint64_t flow_types;
-	uint16_t num;
-	int ret;
-
-	struct rte_flow_action_rss rss_flow_conf = {
-		.func = conf->conf.func,
-		.level = conf->conf.level,
-		.types = conf->conf.types,
-		.key_len = conf->conf.key_len,
-		.queue_num = conf->conf.queue_num,
-		.key = conf->conf.key_len ?
-		    (void *)(uintptr_t)conf->conf.key : NULL,
-		.queue = conf->conf.queue,
-	};
-
-	/* Filter the unsupported flow types */
-	flow_types = conf->conf.types ?
-		     rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
-		     hw->rss_info.conf.types;
-	if (flow_types != rss_flow_conf.types)
-		hns3_warn(hw, "modified RSS types based on hardware support, "
-			      "requested:0x%" PRIx64 " configured:0x%" PRIx64,
-			  rss_flow_conf.types, flow_types);
-	/* Update the useful flow types */
-	rss_flow_conf.types = flow_types;
-
-	rss_info = &hw->rss_info;
-	if (!add) {
-		if (!conf->valid)
-			return 0;
+	struct rte_flow_action_rss *rss_act;
+	int ret;
 
-		ret = hns3_disable_rss(hw);
+	rss_act = &rss_conf->conf;
+	if (rss_act->queue_num > 0) {
+		ret = hns3_update_indir_table(hw, rss_act, rss_act->queue_num);
 		if (ret) {
-			hns3_err(hw, "RSS disable failed(%d)", ret);
+			hns3_err(hw, "set queues action failed, ret = %d", ret);
 			return ret;
 		}
+	}
 
-		if (rss_flow_conf.queue_num) {
-			/*
-			 * Due the content of queue pointer have been reset to
-			 * 0, the rss_info->conf.queue should be set to NULL
-			 */
-			rss_info->conf.queue = NULL;
-			rss_info->conf.queue_num = 0;
+	if (rss_act->key_len > 0 ||
+	    rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+		ret = hns3_update_rss_algo_key(hw, rss_act->func, rss_conf->key,
+					       rss_act->key_len);
+		if (ret != 0) {
+			hns3_err(hw, "set func or hash key action failed, ret = %d",
+				 ret);
+			return ret;
 		}
-
-		return 0;
 	}
 
-	/* Set rx queues to use */
-	num = RTE_MIN(hw->data->nb_rx_queues, rss_flow_conf.queue_num);
-	if (rss_flow_conf.queue_num > num)
-		hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
-			  rss_flow_conf.queue_num);
-	hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
-	if (num) {
-		ret = hns3_update_indir_table(hw, &rss_flow_conf, num);
-		if (ret)
+	if (rss_conf->hw_pctypes > 0) {
+		ret = hns3_flow_set_rss_ptype_tuple(hw, rss_conf);
+		if (ret != 0) {
+			hns3_err(hw, "set types action failed, ret = %d", ret);
 			return ret;
+		}
 	}
 
-	/* Set hash algorithm and flow types by the user's config */
-	ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
-	if (ret)
-		return ret;
-
-	ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
-	if (ret)
-		hns3_err(hw, "RSS config init fail(%d)", ret);
-
-	return ret;
+	return 0;
 }
 
 static int
@@ -1584,52 +1984,44 @@
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_rss_conf_ele *rss_filter_ptr;
 	struct hns3_hw *hw = &hns->hw;
-	int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
-	int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
-	int ret = 0;
 
 	rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
 	while (rss_filter_ptr) {
 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
-		ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info,
-					     false);
-		if (ret)
-			rss_rule_fail_cnt++;
-		else
-			rss_rule_succ_cnt++;
 		rte_free(rss_filter_ptr);
 		rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
 	}
 
-	if (rss_rule_fail_cnt) {
-		hns3_err(hw, "fail to delete all RSS filters, success num = %d "
-			     "fail num = %d", rss_rule_succ_cnt,
-			     rss_rule_fail_cnt);
-		ret = -EIO;
-	}
-
-	return ret;
+	return hns3_config_rss(hns);
 }
 
 static int
-hns3_restore_rss_filter(struct hns3_hw *hw)
+hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
 {
 	struct hns3_rss_conf_ele *filter;
-	int ret = 0;
+	uint32_t rule_no = 0;
+	int ret;
 
-	pthread_mutex_lock(&hw->flows_lock);
 	TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) {
-		if (!filter->filter_info.valid)
-			continue;
-
-		ret = hns3_config_rss_filter(hw, &filter->filter_info, true);
+		ret = hns3_config_rss_filter(hw, &filter->filter_info);
 		if (ret != 0) {
-			hns3_err(hw, "restore RSS filter failed, ret=%d", ret);
-			goto out;
+			hns3_err(hw, "config %uth RSS filter failed, ret = %d",
+				 rule_no, ret);
+			return ret;
 		}
+		rule_no++;
 	}
 
-out:
+	return 0;
+}
+
+static int
+hns3_restore_rss_filter(struct hns3_hw *hw)
+{
+	int ret;
+
+	pthread_mutex_lock(&hw->flows_lock);
+	ret = hns3_reconfig_all_rss_filter(hw);
 	pthread_mutex_unlock(&hw->flows_lock);
 
 	return ret;
@@ -1649,23 +2041,6 @@
 }
 
 static int
-hns3_flow_parse_rss(struct rte_eth_dev *dev,
-		    const struct hns3_rss_conf *conf, bool add)
-{
-	struct hns3_adapter *hns = dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
-	bool ret;
-
-	ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
-	if (ret) {
-		hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
-		return -EINVAL;
-	}
-
-	return hns3_config_rss_filter(hw, conf, add);
-}
-
-static int
 hns3_flow_args_check(const struct rte_flow_attr *attr,
 		     const struct rte_flow_item pattern[],
 		     const struct rte_flow_action actions[],
@@ -1698,31 +2073,55 @@
 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 		   const struct rte_flow_item pattern[],
 		   const struct rte_flow_action actions[],
-		   struct rte_flow_error *error)
+		   struct rte_flow_error *error,
+		   struct hns3_filter_info *filter_info)
 {
-	struct hns3_fdir_rule fdir_rule;
+	union hns3_filter_conf *conf;
 	int ret;
 
 	ret = hns3_flow_args_check(attr, pattern, actions, error);
 	if (ret)
 		return ret;
 
-	if (hns3_find_rss_general_action(pattern, actions))
-		return hns3_parse_rss_filter(dev, actions, error);
+	hns3_parse_filter_type(pattern, actions, filter_info);
+	conf = &filter_info->conf;
+	if (filter_info->type == RTE_ETH_FILTER_HASH)
+		return hns3_parse_rss_filter(dev, pattern, actions,
+					     &conf->rss_conf, error);
 
-	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
-	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
+	return hns3_parse_fdir_filter(dev, pattern, actions,
+				      &conf->fdir_conf, error);
+}
+
+static int
+hns3_flow_rebuild_all_rss_filter(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_config_rss(hns);
+	if (ret != 0) {
+		hns3_err(hw, "restore original RSS configuration failed, ret = %d.",
+			 ret);
+		return ret;
+	}
+	ret = hns3_reconfig_all_rss_filter(hw);
+	if (ret != 0)
+		hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret);
+
+	return ret;
 }
 
 static int
 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
-			  const struct rte_flow_action *act,
+			  struct hns3_flow_rss_conf *rss_conf,
 			  struct rte_flow *flow)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_rss_conf_ele *rss_filter_ptr;
-	struct hns3_rss_conf_ele *filter_ptr;
-	const struct hns3_rss_conf *rss_conf;
+	struct hns3_flow_rss_conf *new_conf;
+	struct rte_flow_action_rss *rss_act;
 	int ret;
 
 	rss_filter_ptr = rte_zmalloc("hns3 rss filter",
@@ -1732,28 +2131,29 @@
 		return -ENOMEM;
 	}
 
+	new_conf = &rss_filter_ptr->filter_info;
+	memcpy(new_conf, rss_conf, sizeof(*new_conf));
+	rss_act = &new_conf->conf;
+	if (rss_act->queue_num > 0)
+		new_conf->conf.queue = new_conf->queue;
 	/*
-	 * After all the preceding tasks are successfully configured, configure
-	 * rules to the hardware to simplify the rollback of rules in the
-	 * hardware.
+	 * There are two ways to deliver hash key action:
+	 * 1> 'key_len' is greater than zero and 'key' isn't NULL.
+	 * 2> 'key_len' is greater than zero, but 'key' is NULL.
+	 * For case 2, we need to keep 'key' of the new_conf is NULL so as to
+	 * inherit the configuration from user in case of failing to verify
+	 * duplicate rule later.
 	 */
-	rss_conf = (const struct hns3_rss_conf *)act->conf;
-	ret = hns3_flow_parse_rss(dev, rss_conf, true);
+	if (rss_act->key_len > 0 && rss_act->key != NULL)
+		new_conf->conf.key = new_conf->key;
+
+	ret = hns3_config_rss_filter(hw, new_conf);
 	if (ret != 0) {
 		rte_free(rss_filter_ptr);
+		(void)hns3_flow_rebuild_all_rss_filter(hns);
 		return ret;
 	}
 
-	hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf);
-	rss_filter_ptr->filter_info.valid = true;
-
-	/*
-	 * When create a new RSS rule, the old rule will be overlaid and set
-	 * invalid.
-	 */
-	TAILQ_FOREACH(filter_ptr, &hw->flow_rss_list, entries)
-		filter_ptr->filter_info.valid = false;
-
 	TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
 	flow->rule = rss_filter_ptr;
 	flow->filter_type = RTE_ETH_FILTER_HASH;
@@ -1763,29 +2163,22 @@
 
 static int
 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
-			   const struct rte_flow_item pattern[],
-			   const struct rte_flow_action actions[],
+			   struct hns3_fdir_rule *fdir_rule,
 			   struct rte_flow_error *error,
 			   struct rte_flow *flow)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	struct hns3_fdir_rule_ele *fdir_rule_ptr;
-	struct hns3_fdir_rule fdir_rule;
 	int ret;
 
-	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
-	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
-	if (ret != 0)
-		return ret;
-
-	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
-		ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
-				       fdir_rule.act_cnt.id, error);
+	if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) {
+		ret = hns3_counter_new(dev, fdir_rule->act_cnt.shared,
+				       fdir_rule->act_cnt.id, error);
 		if (ret != 0)
 			return ret;
 
-		flow->counter_id = fdir_rule.act_cnt.id;
+		flow->counter_id = fdir_rule->act_cnt.id;
 	}
 
 	fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
@@ -1801,11 +2194,11 @@
 	 * rules to the hardware to simplify the rollback of rules in the
 	 * hardware.
 	 */
-	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
+	ret = hns3_fdir_filter_program(hns, fdir_rule, false);
 	if (ret != 0)
 		goto err_fdir_filter;
 
-	memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
+	memcpy(&fdir_rule_ptr->fdir_conf, fdir_rule,
 		sizeof(struct hns3_fdir_rule));
 	TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
 	flow->rule = fdir_rule_ptr;
@@ -1816,8 +2209,8 @@
 err_fdir_filter:
 	rte_free(fdir_rule_ptr);
 err_malloc:
-	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
-		hns3_counter_release(dev, fdir_rule.act_cnt.id);
+	if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER)
+		hns3_counter_release(dev, fdir_rule->act_cnt.id);
 
 	return ret;
 }
@@ -1835,13 +2228,15 @@
 		 struct rte_flow_error *error)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
-	struct hns3_hw *hw = &hns->hw;
+	struct hns3_filter_info filter_info = {0};
 	struct hns3_flow_mem *flow_node;
-	const struct rte_flow_action *act;
+	struct hns3_hw *hw = &hns->hw;
+	union hns3_filter_conf *conf;
 	struct rte_flow *flow;
 	int ret;
 
-	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
+	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
+				 &filter_info);
 	if (ret)
 		return NULL;
 
@@ -1861,13 +2256,12 @@
 	}
 
 	flow_node->flow = flow;
+	conf = &filter_info.conf;
 	TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
-
-	act = hns3_find_rss_general_action(pattern, actions);
-	if (act)
-		ret = hns3_flow_create_rss_rule(dev, act, flow);
+	if (filter_info.type == RTE_ETH_FILTER_HASH)
+		ret = hns3_flow_create_rss_rule(dev, &conf->rss_conf, flow);
 	else
-		ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
+		ret = hns3_flow_create_fdir_rule(dev, &conf->fdir_conf,
 						 error, flow);
 	if (ret == 0)
 		return flow;
@@ -1920,16 +2314,10 @@
 		break;
 	case RTE_ETH_FILTER_HASH:
 		rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
-		ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info,
-					     false);
-		if (ret)
-			return rte_flow_error_set(error, EIO,
-						  RTE_FLOW_ERROR_TYPE_HANDLE,
-						  flow,
-						  "Destroy RSS fail.Try again");
 		TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
 		rte_free(rss_filter_ptr);
 		rss_filter_ptr = NULL;
+		(void)hns3_flow_rebuild_all_rss_filter(hns);
 		break;
 	default:
 		return rte_flow_error_set(error, EINVAL,
@@ -2036,10 +2424,12 @@
 			struct rte_flow_error *error)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_filter_info filter_info = {0};
 	int ret;
 
 	pthread_mutex_lock(&hw->flows_lock);
-	ret = hns3_flow_validate(dev, attr, pattern, actions, error);
+	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
+				 &filter_info);
 	pthread_mutex_unlock(&hw->flows_lock);
 
 	return ret;
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_mp.c dpdk-20.11.8/drivers/net/hns3/hns3_mp.c
--- dpdk-20.11.7/drivers/net/hns3/hns3_mp.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_mp.c	2023-04-27 18:57:22.000000000 +0100
@@ -87,7 +87,7 @@
 	case HNS3_MP_REQ_START_RXTX:
 		PMD_INIT_LOG(INFO, "port %u starting datapath",
 			     dev->data->port_id);
-		hns3_set_rxtx_function(dev);
+		hns3_start_rxtx_datapath(dev);
 		rte_mb();
 		mp_init_msg(dev, &mp_res, param->type);
 		res->result = 0;
@@ -96,7 +96,7 @@
 	case HNS3_MP_REQ_STOP_RXTX:
 		PMD_INIT_LOG(INFO, "port %u stopping datapath",
 			     dev->data->port_id);
-		hns3_set_rxtx_function(dev);
+		hns3_stop_rxtx_datapath(dev);
 		rte_mb();
 		mp_init_msg(dev, &mp_res, param->type);
 		res->result = 0;
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_rss.c dpdk-20.11.8/drivers/net/hns3/hns3_rss.c
--- dpdk-20.11.7/drivers/net/hns3/hns3_rss.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_rss.c	2023-04-27 18:57:22.000000000 +0100
@@ -18,56 +18,11 @@
 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
 };
 
-enum hns3_tuple_field {
-	/* IPV4_TCP ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D = 0,
-	HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S,
-	HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D,
-	HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S,
-
-	/* IPV4_UDP ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D = 8,
-	HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S,
-	HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D,
-	HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S,
-
-	/* IPV4_SCTP ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D = 16,
-	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S,
-	HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D,
-	HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S,
-	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER,
-
-	/* IPV4 ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24,
-	HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S,
-	HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D,
-	HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S,
-
-	/* IPV6_TCP ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D = 32,
-	HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S,
-	HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D,
-	HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S,
-
-	/* IPV6_UDP ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D = 40,
-	HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S,
-	HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D,
-	HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S,
-
-	/* IPV6_SCTP ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D = 48,
-	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S,
-	HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D,
-	HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S,
-	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER,
-
-	/* IPV6 ENABLE FIELD */
-	HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56,
-	HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S,
-	HNS3_RSS_FIELD_IPV6_FRAG_IP_D,
-	HNS3_RSS_FIELD_IPV6_FRAG_IP_S
+const uint8_t hns3_hash_func_map[] = {
+	[RTE_ETH_HASH_FUNCTION_DEFAULT] = HNS3_RSS_HASH_ALGO_TOEPLITZ,
+	[RTE_ETH_HASH_FUNCTION_TOEPLITZ] = HNS3_RSS_HASH_ALGO_TOEPLITZ,
+	[RTE_ETH_HASH_FUNCTION_SIMPLE_XOR] = HNS3_RSS_HASH_ALGO_SIMPLE,
+	[RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ] = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP,
 };
 
 enum hns3_rss_tuple_type {
@@ -79,243 +34,285 @@
 	uint64_t rss_types;
 	uint16_t tuple_type;
 	uint64_t rss_field;
+	uint64_t tuple_mask;
 } hns3_set_tuple_table[] = {
 	/* IPV4-FRAG */
 	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S),
+	  HNS3_RSS_TUPLE_IPV4_FLAG_M },
 	{ ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_FLAG_M },
 	{ ETH_RSS_FRAG_IPV4,
 	  HNS3_RSS_IP_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_FLAG_M },
 
 	/* IPV4 */
 	{ ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S),
+	  HNS3_RSS_TUPLE_IPV4_NONF_M },
 	{ ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_NONF_M },
 	{ ETH_RSS_IPV4,
 	  HNS3_RSS_IP_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_NONF_M },
 
 	/* IPV4-OTHER */
 	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S),
+	  HNS3_RSS_TUPLE_IPV4_NONF_M },
 	{ ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_NONF_M },
 	{ ETH_RSS_NONFRAG_IPV4_OTHER,
 	  HNS3_RSS_IP_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_NONF_M },
 
 	/* IPV4-TCP */
 	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S),
+	  HNS3_RSS_TUPLE_IPV4_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S),
+	  HNS3_RSS_TUPLE_IPV4_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D),
+	  HNS3_RSS_TUPLE_IPV4_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV4_TCP,
 	  HNS3_RSS_IP_L4_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D),
+	  HNS3_RSS_TUPLE_IPV4_TCP_M },
 
 	/* IPV4-UDP */
 	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S),
+	  HNS3_RSS_TUPLE_IPV4_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S),
+	  HNS3_RSS_TUPLE_IPV4_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D),
+	  HNS3_RSS_TUPLE_IPV4_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV4_UDP,
 	  HNS3_RSS_IP_L4_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D),
+	  HNS3_RSS_TUPLE_IPV4_UDP_M },
 
 	/* IPV4-SCTP */
 	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S),
+	  HNS3_RSS_TUPLE_IPV4_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D),
+	  HNS3_RSS_TUPLE_IPV4_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S),
+	  HNS3_RSS_TUPLE_IPV4_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D),
+	  HNS3_RSS_TUPLE_IPV4_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV4_SCTP,
 	  HNS3_RSS_IP_L4_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER),
+	  HNS3_RSS_TUPLE_IPV4_SCTP_M },
 
 	/* IPV6-FRAG */
 	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S),
+	  HNS3_RSS_TUPLE_IPV6_FLAG_M },
 	{ ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_FLAG_M },
 	{ ETH_RSS_FRAG_IPV6,
 	  HNS3_RSS_IP_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_FLAG_M },
 
 	/* IPV6 */
 	{ ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S),
+	  HNS3_RSS_TUPLE_IPV6_NONF_M },
 	{ ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_NONF_M },
 	{ ETH_RSS_IPV6,
 	  HNS3_RSS_IP_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_NONF_M },
 
 	/* IPV6-OTHER */
 	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S),
+	  HNS3_RSS_TUPLE_IPV6_NONF_M },
 	{ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_NONF_M },
 	{ ETH_RSS_NONFRAG_IPV6_OTHER,
 	  HNS3_RSS_IP_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_NONF_M },
 
 	/* IPV6-TCP */
 	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S),
+	  HNS3_RSS_TUPLE_IPV6_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S),
+	  HNS3_RSS_TUPLE_IPV6_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D),
+	  HNS3_RSS_TUPLE_IPV6_TCP_M },
 	{ ETH_RSS_NONFRAG_IPV6_TCP,
 	  HNS3_RSS_IP_L4_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D),
+	  HNS3_RSS_TUPLE_IPV6_TCP_M },
 
 	/* IPV6-UDP */
 	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S),
+	  HNS3_RSS_TUPLE_IPV6_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S),
+	  HNS3_RSS_TUPLE_IPV6_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D),
+	  HNS3_RSS_TUPLE_IPV6_UDP_M },
 	{ ETH_RSS_NONFRAG_IPV6_UDP,
 	  HNS3_RSS_IP_L4_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D),
+	  HNS3_RSS_TUPLE_IPV6_UDP_M },
 
 	/* IPV6-SCTP */
 	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S),
+	  HNS3_RSS_TUPLE_IPV6_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D),
+	  HNS3_RSS_TUPLE_IPV6_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S),
+	  HNS3_RSS_TUPLE_IPV6_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
 	  HNS3_RSS_IP_L4_TUPLE,
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D),
+	  HNS3_RSS_TUPLE_IPV6_SCTP_M },
 	{ ETH_RSS_NONFRAG_IPV6_SCTP,
 	  HNS3_RSS_IP_L4_TUPLE,
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) |
 	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) |
-	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) },
+	  BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER),
+	  HNS3_RSS_TUPLE_IPV6_SCTP_M },
 };
 
 /*
  * rss_generic_config command function, opcode:0x0D01.
- * Used to set algorithm, key_offset and hash key of rss.
+ * Used to set algorithm and hash key of RSS.
  */
 int
-hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key)
+hns3_set_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo,
+		      const uint8_t *key, uint8_t key_len)
 {
-#define HNS3_KEY_OFFSET_MAX	3
-#define HNS3_SET_HASH_KEY_BYTE_FOUR	2
-
 	struct hns3_rss_generic_config_cmd *req;
 	struct hns3_cmd_desc desc;
-	uint32_t key_offset, key_size;
-	const uint8_t *key_cur;
-	uint8_t cur_offset;
+	const uint8_t *cur_key;
+	uint16_t cur_key_size;
+	uint16_t max_bd_num;
+	uint16_t idx;
 	int ret;
 
 	req = (struct hns3_rss_generic_config_cmd *)desc.data;
 
-	/*
-	 * key_offset=0, hash key byte0~15 is set to hardware.
-	 * key_offset=1, hash key byte16~31 is set to hardware.
-	 * key_offset=2, hash key byte32~39 is set to hardware.
-	 */
-	for (key_offset = 0; key_offset < HNS3_KEY_OFFSET_MAX; key_offset++) {
+	max_bd_num = DIV_ROUND_UP(key_len, HNS3_RSS_HASH_KEY_NUM);
+	for (idx = 0; idx < max_bd_num; idx++) {
 		hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG,
 					  false);
 
-		req->hash_config |=
-			(hw->rss_info.hash_algo & HNS3_RSS_HASH_ALGO_MASK);
-		req->hash_config |= (key_offset << HNS3_RSS_HASH_KEY_OFFSET_B);
-
-		if (key_offset == HNS3_SET_HASH_KEY_BYTE_FOUR)
-			key_size = HNS3_RSS_KEY_SIZE - HNS3_RSS_HASH_KEY_NUM *
-			HNS3_SET_HASH_KEY_BYTE_FOUR;
+		req->hash_config |= (hash_algo & HNS3_RSS_HASH_ALGO_MASK);
+		req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B);
+
+		if (idx == max_bd_num - 1 &&
+		    (key_len % HNS3_RSS_HASH_KEY_NUM) != 0)
+			cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM;
 		else
-			key_size = HNS3_RSS_HASH_KEY_NUM;
+			cur_key_size = HNS3_RSS_HASH_KEY_NUM;
 
-		cur_offset = key_offset * HNS3_RSS_HASH_KEY_NUM;
-		key_cur = key + cur_offset;
-		memcpy(req->hash_key, key_cur, key_size);
+		cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM;
+		memcpy(req->hash_key, cur_key, cur_key_size);
 
 		ret = hns3_cmd_send(hw, &desc, 1);
 		if (ret) {
@@ -323,8 +320,49 @@
 			return ret;
 		}
 	}
-	/* Update the shadow RSS key with user specified */
-	memcpy(hw->rss_info.key, key, HNS3_RSS_KEY_SIZE);
+
+	return 0;
+}
+
+int
+hns3_rss_get_algo_key(struct hns3_hw *hw,  uint8_t *hash_algo,
+		      uint8_t *key, uint8_t key_len)
+{
+	struct hns3_rss_generic_config_cmd *req;
+	struct hns3_cmd_desc desc;
+	uint16_t cur_key_size;
+	uint16_t max_bd_num;
+	uint8_t *cur_key;
+	uint16_t idx;
+	int ret;
+
+	req = (struct hns3_rss_generic_config_cmd *)desc.data;
+	max_bd_num = DIV_ROUND_UP(key_len, HNS3_RSS_HASH_KEY_NUM);
+	for (idx = 0; idx < max_bd_num; idx++) {
+		hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG,
+					  true);
+
+		req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B);
+		ret = hns3_cmd_send(hw, &desc, 1);
+		if (ret) {
+			hns3_err(hw, "fail to obtain RSS algo and key from firmware, ret = %d",
+				 ret);
+			return ret;
+		}
+
+		if (idx == 0)
+			*hash_algo = req->hash_config & HNS3_RSS_HASH_ALGO_MASK;
+
+		if (idx == max_bd_num - 1 &&
+		    (key_len % HNS3_RSS_HASH_KEY_NUM) != 0)
+			cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM;
+		else
+			cur_key_size = HNS3_RSS_HASH_KEY_NUM;
+
+		cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM;
+		memcpy(cur_key, req->hash_key, cur_key_size);
+	}
+
 	return 0;
 }
 
@@ -336,6 +374,7 @@
 hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size)
 {
 	struct hns3_rss_indirection_table_cmd *req;
+	uint16_t max_bd_num, cfg_tbl_size;
 	struct hns3_cmd_desc desc;
 	uint8_t qid_msb_off;
 	uint8_t qid_msb_val;
@@ -344,14 +383,20 @@
 	int ret;
 
 	req = (struct hns3_rss_indirection_table_cmd *)desc.data;
-
-	for (i = 0; i < size / HNS3_RSS_CFG_TBL_SIZE; i++) {
+	max_bd_num = DIV_ROUND_UP(size, HNS3_RSS_CFG_TBL_SIZE);
+	for (i = 0; i < max_bd_num; i++) {
 		hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE,
 					  false);
 		req->start_table_index =
 				rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE);
 		req->rss_set_bitmap = rte_cpu_to_le_16(HNS3_RSS_SET_BITMAP_MSK);
-		for (j = 0; j < HNS3_RSS_CFG_TBL_SIZE; j++) {
+
+		if (i == max_bd_num - 1 && (size % HNS3_RSS_CFG_TBL_SIZE) != 0)
+			cfg_tbl_size = size % HNS3_RSS_CFG_TBL_SIZE;
+		else
+			cfg_tbl_size = HNS3_RSS_CFG_TBL_SIZE;
+
+		for (j = 0; j < cfg_tbl_size; j++) {
 			q_id = indir[i * HNS3_RSS_CFG_TBL_SIZE + j];
 			req->rss_result_l[j] = q_id & 0xff;
 
@@ -372,9 +417,53 @@
 		}
 	}
 
-	/* Update redirection table of hw */
-	memcpy(hw->rss_info.rss_indirection_tbl, indir,
-	       sizeof(uint16_t) * size);
+	return 0;
+}
+
+static int
+hns3_get_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size)
+{
+	struct hns3_rss_indirection_table_cmd *req;
+	uint16_t max_bd_num, cfg_tbl_size;
+	uint8_t qid_msb_off, qid_msb_idx;
+	struct hns3_cmd_desc desc;
+	uint16_t q_id, q_hi, q_lo;
+	uint8_t rss_result_h;
+	uint16_t i, j;
+	int ret;
+
+	req = (struct hns3_rss_indirection_table_cmd *)desc.data;
+	max_bd_num = DIV_ROUND_UP(size, HNS3_RSS_CFG_TBL_SIZE);
+	for (i = 0; i < max_bd_num; i++) {
+		hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE,
+					  true);
+		req->start_table_index =
+				rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE);
+		ret = hns3_cmd_send(hw, &desc, 1);
+		if (ret) {
+			hns3_err(hw, "fail to get RSS indirection table from firmware, ret = %d",
+				 ret);
+			return ret;
+		}
+
+		if (i == max_bd_num - 1 && (size % HNS3_RSS_CFG_TBL_SIZE) != 0)
+			cfg_tbl_size = size % HNS3_RSS_CFG_TBL_SIZE;
+		else
+			cfg_tbl_size = HNS3_RSS_CFG_TBL_SIZE;
+
+		for (j = 0; j < cfg_tbl_size; j++) {
+			qid_msb_idx =
+				j * HNS3_RSS_CFG_TBL_BW_H / HNS3_BITS_PER_BYTE;
+			rss_result_h = req->rss_result_h[qid_msb_idx];
+			qid_msb_off =
+				j * HNS3_RSS_CFG_TBL_BW_H % HNS3_BITS_PER_BYTE;
+			q_hi = (rss_result_h >> qid_msb_off) &
+						HNS3_RSS_CFG_TBL_BW_H_M;
+			q_lo = req->rss_result_l[j];
+			q_id = (q_hi << HNS3_RSS_CFG_TBL_BW_L) | q_lo;
+			indir[i * HNS3_RSS_CFG_TBL_SIZE + j] = q_id;
+		}
+	}
 
 	return 0;
 }
@@ -393,41 +482,72 @@
 	}
 
 	ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size);
-	if (ret)
-		hns3_err(hw, "RSS uninit indir table failed: %d", ret);
+	if (ret != 0)
+		hns3_err(hw, "RSS uninit indir table failed, ret = %d.", ret);
+	else
+		memcpy(hw->rss_info.rss_indirection_tbl, lut,
+		       sizeof(uint16_t) * hw->rss_ind_tbl_size);
 	rte_free(lut);
 
 	return ret;
 }
 
-static void
-hns3_rss_check_l3l4_types(struct hns3_hw *hw, uint64_t rss_hf)
+bool
+hns3_check_rss_types_valid(struct hns3_hw *hw, uint64_t types)
 {
 	uint64_t ip_mask = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
 			   ETH_RSS_NONFRAG_IPV4_OTHER |
 			   ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
 			   ETH_RSS_NONFRAG_IPV6_OTHER;
-	uint64_t l4_mask = ETH_RSS_NONFRAG_IPV4_TCP |
+	uint64_t ip_l4_mask = ETH_RSS_NONFRAG_IPV4_TCP |
 			   ETH_RSS_NONFRAG_IPV4_UDP |
 			   ETH_RSS_NONFRAG_IPV4_SCTP |
 			   ETH_RSS_NONFRAG_IPV6_TCP |
 			   ETH_RSS_NONFRAG_IPV6_UDP |
 			   ETH_RSS_NONFRAG_IPV6_SCTP;
-	uint64_t l3_src_dst_mask = ETH_RSS_L3_SRC_ONLY |
-				   ETH_RSS_L3_DST_ONLY;
-	uint64_t l4_src_dst_mask = ETH_RSS_L4_SRC_ONLY |
-				   ETH_RSS_L4_DST_ONLY;
-
-	if (rss_hf & l3_src_dst_mask &&
-	    !(rss_hf & ip_mask || rss_hf & l4_mask))
-		hns3_warn(hw, "packet type isn't specified, L3_SRC/DST_ONLY is ignored.");
+	bool has_l4_src_dst = !!(types & HNS3_RSS_SUPPORT_L4_SRC_DST);
+	bool has_ip_pkt = !!(types & ip_mask);
+	uint64_t final_types;
+
+	if (types == 0)
+		return true;
+
+	if ((types & HNS3_ETH_RSS_SUPPORT) == 0) {
+		hns3_err(hw, "specified types(0x%" PRIx64 ") are unsupported.",
+			 types);
+		return false;
+	}
+
+	if ((types & HNS3_RSS_SUPPORT_L3_SRC_DST) != 0 &&
+	    (types & HNS3_RSS_SUPPORT_FLOW_TYPE) == 0) {
+		hns3_err(hw, "IP or IP-TCP/UDP/SCTP packet type isn't specified, L3_SRC/DST_ONLY cannot be set.");
+		return false;
+	}
+
+	if (has_l4_src_dst && (types & ip_l4_mask) == 0) {
+		if (!has_ip_pkt) {
+			hns3_err(hw, "IP-TCP/UDP/SCTP packet type isn't specified, L4_SRC/DST_ONLY cannot be set.");
+			return false;
+		}
+		/*
+		 * For the case that the types has L4_SRC/DST_ONLY but hasn't
+		 * IP-TCP/UDP/SCTP packet type, this types is considered valid
+		 * if it also has IP packet type.
+		 */
+		hns3_warn(hw, "L4_SRC/DST_ONLY is ignored because of no including L4 packet.");
+	}
 
-	if (rss_hf & l4_src_dst_mask && !(rss_hf & l4_mask))
-		hns3_warn(hw, "packet type isn't specified, L4_SRC/DST_ONLY is ignored.");
+	if ((types & ~HNS3_ETH_RSS_SUPPORT) != 0) {
+		final_types = types & HNS3_ETH_RSS_SUPPORT;
+		hns3_warn(hw, "set RSS types based on hardware support, requested:0x%" PRIx64 " configured:0x%" PRIx64 "",
+			  types, final_types);
+	}
+
+	return true;
 }
 
-static uint64_t
-hns3_rss_calc_tuple_filed(struct hns3_hw *hw, uint64_t rss_hf)
+uint64_t
+hns3_rss_calc_tuple_filed(uint64_t rss_hf)
 {
 	uint64_t l3_only_mask = ETH_RSS_L3_SRC_ONLY |
 				ETH_RSS_L3_DST_ONLY;
@@ -456,34 +576,40 @@
 		    !has_l3_l4_only)
 			tuple |= hns3_set_tuple_table[i].rss_field;
 	}
-	hns3_rss_check_l3l4_types(hw, rss_hf);
 
 	return tuple;
 }
 
 int
-hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf)
+hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields)
 {
 	struct hns3_rss_input_tuple_cmd *req;
 	struct hns3_cmd_desc desc;
-	uint64_t tuple_field;
 	int ret;
 
 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false);
 	req = (struct hns3_rss_input_tuple_cmd *)desc.data;
-
-	tuple_field = hns3_rss_calc_tuple_filed(hw, rss_hf);
-	req->tuple_field = rte_cpu_to_le_64(tuple_field);
+	req->tuple_field = rte_cpu_to_le_64(tuple_fields);
 	ret = hns3_cmd_send(hw, &desc, 1);
-	if (ret) {
-		hns3_err(hw, "Update RSS flow types tuples failed %d", ret);
-		return ret;
-	}
+	if (ret != 0)
+		hns3_err(hw, "set RSS hash tuple fields failed ret = %d", ret);
 
-	/* Update supported flow types when set tuple success */
-	hw->rss_info.conf.types = rss_hf;
+	return ret;
+}
 
-	return 0;
+int
+hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf)
+{
+	uint64_t tuple_fields;
+	int ret;
+
+	tuple_fields = hns3_rss_calc_tuple_filed(rss_hf);
+	ret = hns3_set_rss_tuple_field(hw, tuple_fields);
+	if (ret != 0)
+		hns3_err(hw, "Update RSS flow types tuples failed, ret = %d",
+			 ret);
+
+	return ret;
 }
 
 /*
@@ -500,28 +626,35 @@
 			 struct rte_eth_rss_conf *rss_conf)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint64_t rss_hf_bk = hw->rss_info.conf.types;
+	uint64_t rss_hf_bk = hw->rss_info.rss_hf;
 	uint8_t key_len = rss_conf->rss_key_len;
 	uint64_t rss_hf = rss_conf->rss_hf;
 	uint8_t *key = rss_conf->rss_key;
 	int ret;
 
-	if (key && key_len != HNS3_RSS_KEY_SIZE) {
+	if (key && key_len != hw->rss_key_size) {
 		hns3_err(hw, "the hash key len(%u) is invalid, must be %u",
-			 key_len, HNS3_RSS_KEY_SIZE);
+			 key_len, hw->rss_key_size);
 		return -EINVAL;
 	}
 
+	if (!hns3_check_rss_types_valid(hw, rss_hf))
+		return -EINVAL;
+
 	rte_spinlock_lock(&hw->lock);
 	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_hf);
 	if (ret)
 		goto set_tuple_fail;
 
 	if (key) {
-		ret = hns3_set_rss_algo_key(hw, key);
+		ret = hns3_set_rss_algo_key(hw, hw->rss_info.hash_algo,
+					    key, hw->rss_key_size);
 		if (ret)
 			goto set_algo_key_fail;
+		/* Update the shadow RSS key with user specified */
+		memcpy(hw->rss_info.key, key, hw->rss_key_size);
 	}
+	hw->rss_info.rss_hf = rss_hf;
 	rte_spinlock_unlock(&hw->lock);
 
 	return 0;
@@ -533,6 +666,96 @@
 	return ret;
 }
 
+int
+hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields)
+{
+	struct hns3_rss_input_tuple_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, true);
+	req = (struct hns3_rss_input_tuple_cmd *)desc.data;
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret != 0) {
+		hns3_err(hw, "fail to get RSS hash tuple fields from firmware, ret = %d",
+			 ret);
+		return ret;
+	}
+
+	*tuple_fields = rte_le_to_cpu_64(req->tuple_field);
+
+	return 0;
+}
+
+static uint64_t
+hns3_rss_tuple_fields_to_rss_hf(struct hns3_hw *hw, uint64_t tuple_fields)
+{
+	uint64_t ipv6_sctp_l4_mask =
+				BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) |
+				BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S);
+	uint64_t rss_hf = 0;
+	uint64_t tuple_mask;
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) {
+		tuple_mask = hns3_set_tuple_table[i].tuple_mask;
+		/*
+		 * The RSS hash of the packet type is disabled if its tuples is
+		 * zero.
+		 */
+		if ((tuple_fields & tuple_mask) == 0)
+			continue;
+
+		/*
+		 * Some hardware don't support to use src/dst port fields to
+		 * hash for IPV6-SCTP packet.
+		 */
+		if ((hns3_set_tuple_table[i].rss_types &
+					ETH_RSS_NONFRAG_IPV6_SCTP) &&
+		    !hw->rss_info.ipv6_sctp_offload_supported)
+			tuple_mask &= ~ipv6_sctp_l4_mask;
+
+		/*
+		 * The framework (ethdev ops) or driver (rte flow API) ensure
+		 * that both L3_SRC/DST_ONLY and L4_SRC/DST_ONLY cannot be set
+		 * to driver at the same time. But if user doesn't specify
+		 * anything L3/L4_SRC/DST_ONLY, driver enables all tuple fields.
+		 * In this case, driver should not report L3/L4_SRC/DST_ONLY.
+		 */
+		if ((tuple_fields & tuple_mask) == tuple_mask) {
+			/* Skip the item enabled part tuples. */
+			if ((tuple_fields & hns3_set_tuple_table[i].rss_field) !=
+					tuple_mask)
+				continue;
+
+			rss_hf |= hns3_set_tuple_table[i].rss_types;
+			continue;
+		}
+
+		/* Match the item enabled part tuples.*/
+		if ((tuple_fields & hns3_set_tuple_table[i].rss_field) ==
+					hns3_set_tuple_table[i].rss_field)
+			rss_hf |= hns3_set_tuple_table[i].rss_types;
+	}
+
+	return rss_hf;
+}
+
+static int
+hns3_rss_hash_get_rss_hf(struct hns3_hw *hw, uint64_t *rss_hf)
+{
+	uint64_t tuple_fields;
+	int ret;
+
+	ret = hns3_get_rss_tuple_field(hw, &tuple_fields);
+	if (ret != 0)
+		return ret;
+
+	*rss_hf = hns3_rss_tuple_fields_to_rss_hf(hw, tuple_fields);
+
+	return 0;
+}
+
 /*
  * Get rss key and rss_hf types set of RSS hash configuration.
  * @param dev
@@ -548,19 +771,32 @@
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+	uint8_t hash_algo;
+	int ret;
 
 	rte_spinlock_lock(&hw->lock);
-	rss_conf->rss_hf = rss_cfg->conf.types;
+	ret = hns3_rss_hash_get_rss_hf(hw, &rss_conf->rss_hf);
+	if (ret != 0) {
+		hns3_err(hw, "obtain hash tuples failed, ret = %d", ret);
+		goto out;
+	}
 
 	/* Get the RSS Key required by the user */
-	if (rss_conf->rss_key && rss_conf->rss_key_len >= HNS3_RSS_KEY_SIZE) {
-		memcpy(rss_conf->rss_key, rss_cfg->key, HNS3_RSS_KEY_SIZE);
-		rss_conf->rss_key_len = HNS3_RSS_KEY_SIZE;
+	if (rss_conf->rss_key && rss_conf->rss_key_len >= hw->rss_key_size) {
+		ret = hns3_rss_get_algo_key(hw, &hash_algo, rss_conf->rss_key,
+					    hw->rss_key_size);
+		if (ret != 0) {
+			hns3_err(hw, "obtain hash algo and key failed, ret = %d",
+				 ret);
+			goto out;
+		}
+		rss_conf->rss_key_len = hw->rss_key_size;
 	}
+
+out:
 	rte_spinlock_unlock(&hw->lock);
 
-	return 0;
+	return ret;
 }
 
 /*
@@ -600,12 +836,12 @@
 		idx = i / RTE_RETA_GROUP_SIZE;
 		shift = i % RTE_RETA_GROUP_SIZE;
 		if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) {
-			rte_spinlock_unlock(&hw->lock);
 			hns3_err(hw, "queue id(%u) set to redirection table "
 				 "exceeds queue number(%u) allocated to a TC",
 				 reta_conf[idx].reta[shift],
 				 hw->alloc_rss_size);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
 		}
 
 		if (reta_conf[idx].mask & (1ULL << shift))
@@ -614,7 +850,13 @@
 
 	ret = hns3_set_rss_indir_table(hw, indirection_tbl,
 				       hw->rss_ind_tbl_size);
+	if (ret != 0)
+		goto out;
+
+	memcpy(rss_cfg->rss_indirection_tbl, indirection_tbl,
+	       sizeof(uint16_t) * hw->rss_ind_tbl_size);
 
+out:
 	rte_spinlock_unlock(&hw->lock);
 	return ret;
 }
@@ -636,10 +878,11 @@
 			uint16_t reta_size)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
+	uint16_t reta_table[HNS3_RSS_IND_TBL_SIZE_MAX];
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
 	uint16_t idx, shift;
 	uint16_t i;
+	int ret;
 
 	if (reta_size != hw->rss_ind_tbl_size) {
 		hns3_err(hw, "The size of hash lookup table configured (%u)"
@@ -648,14 +891,22 @@
 		return -EINVAL;
 	}
 	rte_spinlock_lock(&hw->lock);
+	ret = hns3_get_rss_indir_table(hw, reta_table, reta_size);
+	if (ret != 0) {
+		rte_spinlock_unlock(&hw->lock);
+		hns3_err(hw, "query RSS redirection table failed, ret = %d.",
+			 ret);
+		return ret;
+	}
+	rte_spinlock_unlock(&hw->lock);
+
 	for (i = 0; i < reta_size; i++) {
 		idx = i / RTE_RETA_GROUP_SIZE;
 		shift = i % RTE_RETA_GROUP_SIZE;
 		if (reta_conf[idx].mask & (1ULL << shift))
-			reta_conf[idx].reta[shift] =
-						rss_cfg->rss_indirection_tbl[i];
+			reta_conf[idx].reta[shift] = reta_table[i];
 	}
-	rte_spinlock_unlock(&hw->lock);
+
 	return 0;
 }
 
@@ -733,6 +984,52 @@
 	return ret;
 }
 
+/*
+ * Note: the 'hash_algo' is defined by enum rte_eth_hash_function.
+ */
+int
+hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_func,
+			 uint8_t *key, uint8_t key_len)
+{
+	uint8_t rss_key[HNS3_RSS_KEY_SIZE_MAX] = {0};
+	bool modify_key, modify_algo;
+	uint8_t hash_algo;
+	int ret;
+
+	modify_key = (key != NULL && key_len > 0);
+	modify_algo = hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT;
+	if (!modify_key && !modify_algo)
+		return 0;
+
+	if (modify_algo && hash_func >= RTE_DIM(hns3_hash_func_map)) {
+		hns3_err(hw, "hash func (%u) is unsupported.", hash_func);
+		return -ENOTSUP;
+	}
+	if (modify_key && key_len != hw->rss_key_size) {
+		hns3_err(hw, "hash key length (%u) is invalid.", key_len);
+		return -EINVAL;
+	}
+
+	ret = hns3_rss_get_algo_key(hw, &hash_algo, rss_key, hw->rss_key_size);
+	if (ret != 0) {
+		hns3_err(hw, "fail to get RSS hash algorithm and key, ret = %d",
+			 ret);
+		return ret;
+	}
+
+	if (modify_algo)
+		hash_algo = hns3_hash_func_map[hash_func];
+	if (modify_key)
+		memcpy(rss_key, key, key_len);
+
+	ret = hns3_set_rss_algo_key(hw, hash_algo, rss_key, hw->rss_key_size);
+	if (ret != 0)
+		hns3_err(hw, "fail to set RSS hash algorithm and key, ret = %d",
+			 ret);
+
+	return ret;
+}
+
 static void
 hns3_rss_tuple_uninit(struct hns3_hw *hw)
 {
@@ -764,10 +1061,11 @@
 	int i;
 
 	/* Default hash algorithm */
-	rss_cfg->conf.func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+	rss_cfg->hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
 
-	/* Default RSS key */
-	memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE);
+	hw->rss_info.rss_hf = 0;
+	memcpy(rss_cfg->key, hns3_hash_key,
+		RTE_MIN(sizeof(hns3_hash_key), hw->rss_key_size));
 
 	/* Initialize RSS indirection table */
 	for (i = 0; i < hw->rss_ind_tbl_size; i++)
@@ -788,20 +1086,8 @@
 
 	enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode;
 
-	switch (hw->rss_info.conf.func) {
-	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
-		hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
-		break;
-	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
-		hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
-		break;
-	default:
-		hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
-		break;
-	}
-
-	/* Configure RSS hash algorithm and hash key offset */
-	ret = hns3_set_rss_algo_key(hw, hash_key);
+	ret = hns3_set_rss_algo_key(hw, rss_cfg->hash_algo,
+				    hash_key, hw->rss_key_size);
 	if (ret)
 		return ret;
 
@@ -815,15 +1101,22 @@
 		return ret;
 
 	/*
-	 * When muli-queue RSS mode flag is not set or unsupported tuples are
+	 * When multi-queue RSS mode flag is not set or unsupported tuples are
 	 * set, disable all tuples.
 	 */
-	rss_hf = hw->rss_info.conf.types;
+	rss_hf = hw->rss_info.rss_hf;
 	if (!((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) ||
 	    !(rss_hf & HNS3_ETH_RSS_SUPPORT))
 		rss_hf = 0;
 
-	return hns3_set_rss_tuple_by_rss_hf(hw, rss_hf);
+	ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_hf);
+	if (ret != 0) {
+		hns3_err(hw, "set RSS tuples failed, ret = %d.", ret);
+		return ret;
+	}
+	hw->rss_info.rss_hf = rss_hf;
+
+	return 0;
 }
 
 /*
@@ -841,5 +1134,5 @@
 		return;
 
 	/* Disable RSS */
-	hw->rss_info.conf.types = 0;
+	hw->rss_info.rss_hf = 0;
 }
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_rss.h dpdk-20.11.8/drivers/net/hns3/hns3_rss.h
--- dpdk-20.11.7/drivers/net/hns3/hns3_rss.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_rss.h	2023-04-27 18:57:22.000000000 +0100
@@ -8,27 +8,107 @@
 #include <rte_ethdev.h>
 #include <rte_flow.h>
 
-#define HNS3_ETH_RSS_SUPPORT ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_FRAG_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_NONFRAG_IPV4_SCTP | \
-	ETH_RSS_NONFRAG_IPV4_OTHER | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_FRAG_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_NONFRAG_IPV6_SCTP | \
-	ETH_RSS_NONFRAG_IPV6_OTHER | \
-	ETH_RSS_L3_SRC_ONLY | \
-	ETH_RSS_L3_DST_ONLY | \
-	ETH_RSS_L4_SRC_ONLY | \
-	ETH_RSS_L4_DST_ONLY)
+#define HNS3_RSS_SUPPORT_L3_SRC_DST	(ETH_RSS_L3_SRC_ONLY | \
+					 ETH_RSS_L3_DST_ONLY)
+#define HNS3_RSS_SUPPORT_L4_SRC_DST	(ETH_RSS_L4_SRC_ONLY | \
+					 ETH_RSS_L4_DST_ONLY)
+#define HNS3_RSS_SUPPORT_L3L4		(HNS3_RSS_SUPPORT_L3_SRC_DST | \
+					 HNS3_RSS_SUPPORT_L4_SRC_DST)
+
+#define HNS3_RSS_SUPPORT_FLOW_TYPE	(ETH_RSS_IPV4 | \
+					 ETH_RSS_FRAG_IPV4 | \
+					 ETH_RSS_NONFRAG_IPV4_TCP | \
+					 ETH_RSS_NONFRAG_IPV4_UDP | \
+					 ETH_RSS_NONFRAG_IPV4_SCTP | \
+					 ETH_RSS_NONFRAG_IPV4_OTHER | \
+					 ETH_RSS_IPV6 | \
+					 ETH_RSS_FRAG_IPV6 | \
+					 ETH_RSS_NONFRAG_IPV6_TCP | \
+					 ETH_RSS_NONFRAG_IPV6_UDP | \
+					 ETH_RSS_NONFRAG_IPV6_SCTP | \
+					 ETH_RSS_NONFRAG_IPV6_OTHER)
+
+#define HNS3_ETH_RSS_SUPPORT		(HNS3_RSS_SUPPORT_FLOW_TYPE | \
+					 HNS3_RSS_SUPPORT_L3L4)
+
+enum hns3_tuple_field {
+	/* IPV4_TCP ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D = 0,
+	HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S,
+	HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D,
+	HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S,
+
+	/* IPV4_UDP ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D = 8,
+	HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S,
+	HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D,
+	HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S,
+
+	/* IPV4_SCTP ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D = 16,
+	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S,
+	HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D,
+	HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S,
+	HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER,
+
+	/* IPV4 ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24,
+	HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S,
+	HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D,
+	HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S,
+
+	/* IPV6_TCP ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D = 32,
+	HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S,
+	HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D,
+	HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S,
+
+	/* IPV6_UDP ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D = 40,
+	HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S,
+	HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D,
+	HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S,
+
+	/* IPV6_SCTP ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D = 48,
+	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S,
+	HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D,
+	HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S,
+	HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER,
+
+	/* IPV6 ENABLE FIELD */
+	HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56,
+	HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S,
+	HNS3_RSS_FIELD_IPV6_FRAG_IP_D,
+	HNS3_RSS_FIELD_IPV6_FRAG_IP_S
+};
+
+#define HNS3_RSS_PCTYPE_IPV4_TCP	BIT_ULL(0)
+#define HNS3_RSS_PCTYPE_IPV4_UDP	BIT_ULL(8)
+#define HNS3_RSS_PCTYPE_IPV4_SCTP	BIT_ULL(16)
+#define HNS3_RSS_PCTYPE_IPV4_NONF	BIT_ULL(24)
+#define HNS3_RSS_PCTYPE_IPV4_FLAG	BIT_ULL(26)
+#define HNS3_RSS_PCTYPE_IPV6_TCP	BIT_ULL(32)
+#define HNS3_RSS_PCTYPE_IPV6_UDP	BIT_ULL(40)
+#define HNS3_RSS_PCTYPE_IPV6_SCTP	BIT_ULL(48)
+#define HNS3_RSS_PCTYPE_IPV6_NONF	BIT_ULL(56)
+#define HNS3_RSS_PCTYPE_IPV6_FLAG	BIT_ULL(58)
+
+#define HNS3_RSS_TUPLE_IPV4_TCP_M	GENMASK(3, 0)
+#define HNS3_RSS_TUPLE_IPV4_UDP_M	GENMASK(11, 8)
+#define HNS3_RSS_TUPLE_IPV4_SCTP_M	GENMASK(20, 16)
+#define HNS3_RSS_TUPLE_IPV4_NONF_M	GENMASK(25, 24)
+#define HNS3_RSS_TUPLE_IPV4_FLAG_M	GENMASK(27, 26)
+#define HNS3_RSS_TUPLE_IPV6_TCP_M	GENMASK(35, 32)
+#define HNS3_RSS_TUPLE_IPV6_UDP_M	GENMASK(43, 40)
+#define HNS3_RSS_TUPLE_IPV6_SCTP_M	GENMASK(52, 48)
+#define HNS3_RSS_TUPLE_IPV6_NONF_M	GENMASK(57, 56)
+#define HNS3_RSS_TUPLE_IPV6_FLAG_M	GENMASK(59, 58)
 
 #define HNS3_RSS_IND_TBL_SIZE	512 /* The size of hash lookup table */
 #define HNS3_RSS_IND_TBL_SIZE_MAX 2048
 #define HNS3_RSS_KEY_SIZE	40
+#define HNS3_RSS_KEY_SIZE_MAX	128
 #define HNS3_RSS_SET_BITMAP_MSK	0xffff
 
 #define HNS3_RSS_HASH_ALGO_TOEPLITZ	0
@@ -40,15 +120,13 @@
 	uint64_t rss_tuple_fields;
 };
 
-#define HNS3_RSS_QUEUES_BUFFER_NUM	64 /* Same as the Max rx/tx queue num */
+/* Same as the Max queue num under TC */
+#define HNS3_RSS_QUEUES_BUFFER_NUM	512
 struct hns3_rss_conf {
-	/* RSS parameters :algorithm, flow_types,  key, queue */
-	struct rte_flow_action_rss conf;
+	uint64_t rss_hf;
 	uint8_t hash_algo; /* hash function type defined by hardware */
-	uint8_t key[HNS3_RSS_KEY_SIZE];  /* Hash key */
+	uint8_t key[HNS3_RSS_KEY_SIZE_MAX];  /* Hash key */
 	uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
-	uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */
-	bool valid; /* check if RSS rule is valid */
 	/*
 	 * For IPv6 SCTP packets type, check whether the NIC hardware support
 	 * RSS hash using the src/dst port as the input tuple. For Kunpeng920
@@ -112,8 +190,17 @@
 int hns3_rss_reset_indir_table(struct hns3_hw *hw);
 int hns3_config_rss(struct hns3_adapter *hns);
 void hns3_rss_uninit(struct hns3_adapter *hns);
+bool hns3_check_rss_types_valid(struct hns3_hw *hw, uint64_t types);
 int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf);
-int hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key);
+int hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields);
+int hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields);
+int hns3_set_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo,
+			  const uint8_t *key, uint8_t key_len);
 int hns3_restore_filter(struct hns3_adapter *hns);
+int hns3_rss_get_algo_key(struct hns3_hw *hw,  uint8_t *hash_algo,
+			  uint8_t *key, uint8_t key_len);
+uint64_t hns3_rss_calc_tuple_filed(uint64_t rss_hf);
+int hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo,
+			     uint8_t *key, uint8_t key_len);
 
 #endif /* _HNS3_RSS_H_ */
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.c dpdk-20.11.8/drivers/net/hns3/hns3_rxtx.c
--- dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_rxtx.c	2023-04-27 18:57:22.000000000 +0100
@@ -19,10 +19,16 @@
 #include "hns3_regs.h"
 #include "hns3_logs.h"
 #include "hns3_rxtx.h"
+#include "hns3_mp.h"
 
 #define HNS3_CFG_DESC_NUM(num)	((num) / 8 - 1)
 #define HNS3_RX_RING_PREFETCTH_MASK	3
 
+static uint16_t
+hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused,
+		      struct rte_mbuf **pkts __rte_unused,
+		      uint16_t pkts_n __rte_unused);
+
 static void
 hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)
 {
@@ -2527,10 +2533,11 @@
 		eth_rx_burst_t pkt_burst;
 		const char *info;
 	} burst_infos[] = {
-		{ hns3_recv_pkts,		"Scalar" },
+		{ hns3_recv_pkts,		"Scalar"           },
 		{ hns3_recv_scattered_pkts,	"Scalar Scattered" },
-		{ hns3_recv_pkts_vec,		"Vector Neon" },
-		{ hns3_recv_pkts_vec_sve,	"Vector Sve" },
+		{ hns3_recv_pkts_vec,		"Vector Neon"      },
+		{ hns3_recv_pkts_vec_sve,	"Vector Sve"       },
+		{ hns3_dummy_rxtx_burst,        "Dummy"            },
 	};
 
 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
@@ -3867,24 +3874,31 @@
 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		       struct rte_eth_burst_mode *mode)
 {
-	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
-	const char *info = NULL;
-
-	if (pkt_burst == hns3_xmit_pkts_simple)
-		info = "Scalar Simple";
-	else if (pkt_burst == hns3_xmit_pkts)
-		info = "Scalar";
-	else if (pkt_burst == hns3_xmit_pkts_vec)
-		info = "Vector Neon";
-	else if (pkt_burst == hns3_xmit_pkts_vec_sve)
-		info = "Vector Sve";
+	static const struct {
+		eth_tx_burst_t pkt_burst;
+		const char *info;
+	} burst_infos[] = {
+		{ hns3_xmit_pkts_simple,	"Scalar Simple" },
+		{ hns3_xmit_pkts,		"Scalar"        },
+		{ hns3_xmit_pkts_vec,		"Vector Neon"   },
+		{ hns3_xmit_pkts_vec_sve,	"Vector Sve"    },
+		{ hns3_dummy_rxtx_burst,	"Dummy"         },
+	};
 
-	if (info == NULL)
-		return -EINVAL;
+	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+	int ret = -EINVAL;
+	unsigned int i;
 
-	snprintf(mode->info, sizeof(mode->info), "%s", info);
+	for (i = 0; i < RTE_DIM(burst_infos); i++) {
+		if (pkt_burst == burst_infos[i].pkt_burst) {
+			snprintf(mode->info, sizeof(mode->info), "%s",
+				 burst_infos[i].info);
+			ret = 0;
+			break;
+		}
+	}
 
-	return 0;
+	return ret;
 }
 
 static eth_tx_burst_t
@@ -4117,3 +4131,31 @@
 	else
 		return fbd_num - driver_hold_bd_num;
 }
+
+void
+hns3_stop_rxtx_datapath(struct rte_eth_dev *dev)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	hns3_set_rxtx_function(dev);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
+	rte_wmb();
+	/* Disable datapath on secondary process. */
+	hns3_mp_req_stop_rxtx(dev);
+	/* Prevent crashes when queues are still in use. */
+	rte_delay_ms(hw->cfg_max_queues);
+}
+
+void
+hns3_start_rxtx_datapath(struct rte_eth_dev *dev)
+{
+	hns3_set_rxtx_function(dev);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
+	hns3_mp_req_start_rxtx(dev);
+}
diff -Nru dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.h dpdk-20.11.8/drivers/net/hns3/hns3_rxtx.h
--- dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/hns3/hns3_rxtx.h	2023-04-27 18:57:22.000000000 +0100
@@ -702,5 +702,7 @@
 int hns3_start_all_rxqs(struct rte_eth_dev *dev);
 void hns3_stop_all_txqs(struct rte_eth_dev *dev);
 void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
+void hns3_stop_rxtx_datapath(struct rte_eth_dev *dev);
+void hns3_start_rxtx_datapath(struct rte_eth_dev *dev);
 
 #endif /* _HNS3_RXTX_H_ */
diff -Nru dpdk-20.11.7/drivers/net/i40e/i40e_ethdev.c dpdk-20.11.8/drivers/net/i40e/i40e_ethdev.c
--- dpdk-20.11.7/drivers/net/i40e/i40e_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/i40e/i40e_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -397,7 +397,6 @@
 				      struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
 	const struct rte_eth_ethertype_filter *input,
@@ -1779,6 +1778,11 @@
 	/* initialize mirror rule list */
 	TAILQ_INIT(&pf->mirror_list);
 
+	/* Set the max frame size to 0x2600 by default,
+	 * in case other drivers changed the default value.
+	 */
+	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
+
 	/* initialize RSS rule list */
 	TAILQ_INIT(&pf->rss_config_list);
 
@@ -2430,7 +2434,6 @@
 	uint32_t intr_vector = 0;
 	struct i40e_vsi *vsi;
 	uint16_t nb_rxq, nb_txq;
-	uint16_t max_frame_size;
 
 	hw->adapter_stopped = 0;
 
@@ -2517,10 +2520,21 @@
 		}
 	}
 
+	/* Disable mac loopback mode */
+	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE) {
+		ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MODE_NONE, NULL);
+		if (ret != I40E_SUCCESS) {
+			PMD_DRV_LOG(ERR, "fail to set loopback link");
+			goto tx_err;
+		}
+	}
+
 	/* Enable mac loopback mode */
-	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
-	    dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
-		ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
+	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_EN) {
+		if (hw->mac.type == I40E_MAC_X722)
+			ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC_LOCAL_X722, NULL);
+		else
+			ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC, NULL);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "fail to set loopback link");
 			goto tx_err;
@@ -2572,9 +2586,6 @@
 			    "please call hierarchy_commit() "
 			    "before starting the port");
 
-	max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
-	i40e_set_mac_max_frame(dev, max_frame_size);
-
 	return I40E_SUCCESS;
 
 tx_err:
@@ -2942,9 +2953,6 @@
 	return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
-#define CHECK_INTERVAL             100  /* 100ms */
-#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
-
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -3012,6 +3020,8 @@
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
 	bool enable_lse, int wait_to_complete)
 {
+#define CHECK_INTERVAL             100  /* 100ms */
+#define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
 	uint32_t rep_cnt = MAX_REPEAT_TIME;
 	struct i40e_link_status link_status;
 	int status;
@@ -6814,7 +6824,6 @@
 			if (!ret)
 				rte_eth_dev_callback_process(dev,
 					RTE_ETH_EVENT_INTR_LSC, NULL);
-
 			break;
 		default:
 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -13132,40 +13141,6 @@
 	return 0;
 }
 
-static void
-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
-{
-	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t rep_cnt = MAX_REPEAT_TIME;
-	struct rte_eth_link link;
-	enum i40e_status_code status;
-	bool can_be_set = true;
-
-	/*
-	 * I40E_MEDIA_TYPE_BASET link up can be ignored
-	 * I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type
-	 * is I40E_MEDIA_TYPE_UNKNOWN
-	 */
-	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
-	    hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) {
-		do {
-			update_link_reg(hw, &link);
-			if (link.link_status)
-				break;
-			rte_delay_ms(CHECK_INTERVAL);
-		} while (--rep_cnt);
-		can_be_set = !!link.link_status;
-	}
-
-	if (can_be_set) {
-		status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
-		if (status != I40E_SUCCESS)
-			PMD_DRV_LOG(ERR, "Failed to set max frame size at port level");
-	} else {
-		PMD_DRV_LOG(ERR, "Set max frame size at port level not applicable on link down");
-	}
-}
-
 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
diff -Nru dpdk-20.11.7/drivers/net/i40e/i40e_ethdev.h dpdk-20.11.8/drivers/net/i40e/i40e_ethdev.h
--- dpdk-20.11.7/drivers/net/i40e/i40e_ethdev.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/i40e/i40e_ethdev.h	2023-04-27 18:57:22.000000000 +0100
@@ -47,6 +47,9 @@
 #define I40E_MAX_VF               128
 /*flag of no loopback*/
 #define I40E_AQ_LB_MODE_NONE	  0x0
+#define I40E_AQ_LB_MODE_EN	  0x01
+#define I40E_AQ_LB_MAC		  0x01
+#define I40E_AQ_LB_MAC_LOCAL_X722 0x04
 /*
  * vlan_id is a 12 bit number.
  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -1560,7 +1563,7 @@
 	uint16_t interval = 0;
 
 	if (is_multi_drv) {
-		interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+		interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
 	} else {
 		if (is_pf)
 			interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
diff -Nru dpdk-20.11.7/drivers/net/i40e/i40e_flow.c dpdk-20.11.8/drivers/net/i40e/i40e_flow.c
--- dpdk-20.11.7/drivers/net/i40e/i40e_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/i40e/i40e_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -1992,6 +1992,14 @@
 	}
 
 	/* Not supported */
+	if (attr->transfer) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+				   attr, "Not support transfer.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
 	if (attr->priority) {
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
diff -Nru dpdk-20.11.7/drivers/net/iavf/iavf_ethdev.c dpdk-20.11.8/drivers/net/iavf/iavf_ethdev.c
--- dpdk-20.11.7/drivers/net/iavf/iavf_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/iavf/iavf_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -750,6 +750,9 @@
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (vf->vf_reset)
+		return 0;
+
 	if (adapter->stopped == 1)
 		return 0;
 
diff -Nru dpdk-20.11.7/drivers/net/iavf/iavf_generic_flow.c dpdk-20.11.8/drivers/net/iavf/iavf_generic_flow.c
--- dpdk-20.11.7/drivers/net/iavf/iavf_generic_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/iavf/iavf_generic_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -1246,11 +1246,12 @@
 	}
 
 	flow->engine = engine;
+	rte_spinlock_lock(&vf->flow_ops_lock);
 	TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
+	rte_spinlock_unlock(&vf->flow_ops_lock);
 	PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
 
 free_flow:
-	rte_spinlock_unlock(&vf->flow_ops_lock);
 	return flow;
 }
 
diff -Nru dpdk-20.11.7/drivers/net/iavf/iavf.h dpdk-20.11.8/drivers/net/iavf/iavf.h
--- dpdk-20.11.7/drivers/net/iavf/iavf.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/iavf/iavf.h	2023-04-27 18:57:22.000000000 +0100
@@ -160,6 +160,7 @@
 	struct iavf_qv_map *qv_map; /* queue vector mapping */
 	struct iavf_flow_list flow_list;
 	rte_spinlock_t flow_ops_lock;
+	rte_spinlock_t aq_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
 
diff -Nru dpdk-20.11.7/drivers/net/iavf/iavf_vchnl.c dpdk-20.11.8/drivers/net/iavf/iavf_vchnl.c
--- dpdk-20.11.7/drivers/net/iavf/iavf_vchnl.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/iavf/iavf_vchnl.c	2023-04-27 18:57:22.000000000 +0100
@@ -387,6 +387,20 @@
 	return err;
 }
 
+static int
+iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter,
+	struct iavf_cmd_info *args)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	int ret;
+
+	rte_spinlock_lock(&vf->aq_lock);
+	ret = iavf_execute_vf_cmd(adapter, args);
+	rte_spinlock_unlock(&vf->aq_lock);
+
+	return ret;
+}
+
 static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
@@ -504,7 +518,7 @@
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -525,7 +539,7 @@
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -554,7 +568,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -609,7 +623,7 @@
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -654,7 +668,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -686,7 +700,7 @@
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -714,7 +728,7 @@
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -747,7 +761,7 @@
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -789,7 +803,7 @@
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -833,7 +847,7 @@
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -879,7 +893,7 @@
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -911,7 +925,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -943,7 +957,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1035,7 +1049,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1076,7 +1090,7 @@
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1117,7 +1131,7 @@
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1179,7 +1193,7 @@
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd_safe(adapter, &args);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1206,7 +1220,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1241,7 +1255,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1281,7 +1295,7 @@
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1308,7 +1322,7 @@
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1335,7 +1349,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1395,7 +1409,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1442,7 +1456,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow director rule");
 		return err;
@@ -1483,7 +1497,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1536,7 +1550,7 @@
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1580,10 +1594,14 @@
 	/*
 	 * disable interrupt to avoid the admin queue message to be read
 	 * before iavf_read_msg_from_pf.
+	 *
+	 * don't disable interrupt handler until ready to execute vf cmd.
 	 */
+	rte_spinlock_lock(&vf->aq_lock);
 	rte_intr_disable(&pci_dev->intr_handle);
 	err = iavf_execute_vf_cmd(adapter, &args);
 	rte_intr_enable(&pci_dev->intr_handle);
+	rte_spinlock_unlock(&vf->aq_lock);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
 		return err;
@@ -1618,7 +1636,7 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
diff -Nru dpdk-20.11.7/drivers/net/ice/ice_generic_flow.c dpdk-20.11.8/drivers/net/ice/ice_generic_flow.c
--- dpdk-20.11.7/drivers/net/ice/ice_generic_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/ice/ice_generic_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -1946,6 +1946,14 @@
 		return -rte_errno;
 	}
 
+	/* Not supported */
+	if (attr->transfer) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+				   attr, "Not support transfer.");
+		return -rte_errno;
+	}
+
 	/* Check pipeline mode support to set classification stage */
 	if (ad->devargs.pipe_mode_support) {
 		if (attr->priority == 0)
diff -Nru dpdk-20.11.7/drivers/net/ipn3ke/ipn3ke_ethdev.c dpdk-20.11.8/drivers/net/ipn3ke/ipn3ke_ethdev.c
--- dpdk-20.11.7/drivers/net/ipn3ke/ipn3ke_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/ipn3ke/ipn3ke_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -561,7 +561,7 @@
 		snprintf(name, sizeof(name), "net_%s_representor_%d",
 			afu_dev->device.name, i);
 
-		ethdev = rte_eth_dev_allocated(afu_dev->device.name);
+		ethdev = rte_eth_dev_allocated(name);
 		if (ethdev != NULL)
 			rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
 	}
diff -Nru dpdk-20.11.7/drivers/net/ipn3ke/ipn3ke_representor.c dpdk-20.11.8/drivers/net/ipn3ke/ipn3ke_representor.c
--- dpdk-20.11.7/drivers/net/ipn3ke/ipn3ke_representor.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/ipn3ke/ipn3ke_representor.c	2023-04-27 18:57:22.000000000 +0100
@@ -2589,7 +2589,7 @@
 		}
 		rte_delay_us(50 * MS);
 
-		if (num == 0xffffff)
+		if (num == 0 || num == 0xffffff)
 			return NULL;
 	}
 
diff -Nru dpdk-20.11.7/drivers/net/ixgbe/ixgbe_ethdev.c dpdk-20.11.8/drivers/net/ixgbe/ixgbe_ethdev.c
--- dpdk-20.11.7/drivers/net/ixgbe/ixgbe_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/ixgbe/ixgbe_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -3869,23 +3869,32 @@
 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	u16 eeprom_verh, eeprom_verl;
-	u32 etrack_id;
+	struct ixgbe_nvm_version nvm_ver;
 	int ret;
 
-	ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
-	ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+	ixgbe_get_oem_prod_version(hw, &nvm_ver);
+	if (nvm_ver.oem_valid) {
+		snprintf(fw_version, fw_size, "%x.%x.%x",
+			 nvm_ver.oem_major, nvm_ver.oem_minor,
+			 nvm_ver.oem_release);
+		return 0;
+	}
+
+	ixgbe_get_etk_id(hw, &nvm_ver);
+	ixgbe_get_orom_version(hw, &nvm_ver);
 
-	etrack_id = (eeprom_verh << 16) | eeprom_verl;
-	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+	if (nvm_ver.or_valid) {
+		snprintf(fw_version, fw_size, "0x%08x, %d.%d.%d",
+			 nvm_ver.etk_id, nvm_ver.or_major,
+			 nvm_ver.or_build, nvm_ver.or_patch);
+		return 0;
+	}
+
+	ret = snprintf(fw_version, fw_size, "0x%08x", nvm_ver.etk_id);
 	if (ret < 0)
 		return -EINVAL;
 
-	ret += 1; /* add the size of '\0' */
-	if (fw_size < (size_t)ret)
-		return ret;
-	else
-		return 0;
+	return (fw_size < (size_t)ret++) ? ret : 0;
 }
 
 static int
diff -Nru dpdk-20.11.7/drivers/net/ixgbe/ixgbe_flow.c dpdk-20.11.8/drivers/net/ixgbe/ixgbe_flow.c
--- dpdk-20.11.7/drivers/net/ixgbe/ixgbe_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/ixgbe/ixgbe_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -1918,9 +1918,9 @@
 
 		/* check src addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
-				rule->mask.src_ipv6_mask |= 1 << j;
-			} else if (ipv6_mask->hdr.src_addr[j] != 0) {
+			if (ipv6_mask->hdr.src_addr[j] == 0) {
+				rule->mask.src_ipv6_mask &= ~(1 << j);
+			} else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) {
 				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1931,9 +1931,9 @@
 
 		/* check dst addr mask */
 		for (j = 0; j < 16; j++) {
-			if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
-				rule->mask.dst_ipv6_mask |= 1 << j;
-			} else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+			if (ipv6_mask->hdr.dst_addr[j] == 0) {
+				rule->mask.dst_ipv6_mask &= ~(1 << j);
+			} else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
 				memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ITEM,
diff -Nru dpdk-20.11.7/drivers/net/mlx5/linux/mlx5_ethdev_os.c dpdk-20.11.8/drivers/net/mlx5/linux/mlx5_ethdev_os.c
--- dpdk-20.11.7/drivers/net/mlx5/linux/mlx5_ethdev_os.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/linux/mlx5_ethdev_os.c	2023-04-27 18:57:22.000000000 +0100
@@ -1074,7 +1074,8 @@
 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
 {
 	char ifname[IF_NAMESIZE];
-	char port_name[IF_NAMESIZE];
+	char *port_name = NULL;
+	size_t port_name_size = 0;
 	FILE *file;
 	struct mlx5_switch_info data = {
 		.master = 0,
@@ -1087,6 +1088,7 @@
 	bool port_switch_id_set = false;
 	bool device_dir = false;
 	char c;
+	ssize_t line_size;
 
 	if (!if_indextoname(ifindex, ifname)) {
 		rte_errno = errno;
@@ -1102,8 +1104,21 @@
 
 	file = fopen(phys_port_name, "rb");
 	if (file != NULL) {
-		if (fgets(port_name, IF_NAMESIZE, file) != NULL)
+		char *tail_nl;
+
+		line_size = getline(&port_name, &port_name_size, file);
+		if (line_size < 0) {
+			fclose(file);
+			rte_errno = errno;
+			return -rte_errno;
+		} else if (line_size > 0) {
+			/* Remove tailing newline character. */
+			tail_nl = strchr(port_name, '\n');
+			if (tail_nl)
+				*tail_nl = '\0';
 			mlx5_translate_port_name(port_name, &data);
+		}
+		free(port_name);
 		fclose(file);
 	}
 	file = fopen(phys_switch_id, "rb");
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_flow.c dpdk-20.11.8/drivers/net/mlx5/mlx5_flow.c
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_flow.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_flow.c	2023-04-27 18:57:22.000000000 +0100
@@ -4821,13 +4821,14 @@
 	if (!fdb_tx) {
 		/* Prepare the prefix tag action. */
 		set_tag = (void *)(actions_pre + actions_n + 1);
-		ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
 		/* Trust VF/SF on CX5 not supported meter so that the reserved
 		 * metadata regC is REG_NON, back to use application tag
 		 * index 0.
 		 */
-		if (unlikely(ret == REG_NON))
+		if (unlikely(priv->mtr_color_reg == REG_NON))
 			ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
+		else
+			ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error);
 		if (ret < 0)
 			return ret;
 		set_tag->id = ret;
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx.c dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx.c
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx.c	2023-04-27 18:57:22.000000000 +0100
@@ -86,7 +86,8 @@
 
 static __rte_always_inline int
 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
-		 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
+		 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe,
+		 uint16_t *skip_cnt, bool mprq);
 
 static __rte_always_inline uint32_t
 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
@@ -784,7 +785,7 @@
 			mlx5_dump_debug_information(name, "MLX5 Error CQ:",
 						    (const void *)((uintptr_t)
 						    txq->cqes),
-						    sizeof(*err_cqe) *
+						    sizeof(struct mlx5_cqe) *
 						    (1 << txq->cqe_n));
 			mlx5_dump_debug_information(name, "MLX5 Error SQ:",
 						    (const void *)((uintptr_t)
@@ -983,10 +984,14 @@
 	return ret;
 }
 
+#define MLX5_ERROR_CQE_MASK 0x40000000
 /* Must be negative. */
-#define MLX5_ERROR_CQE_RET (-1)
+#define MLX5_REGULAR_ERROR_CQE_RET (-5)
+#define MLX5_CRITICAL_ERROR_CQE_RET (-4)
 /* Must not be negative. */
 #define MLX5_RECOVERY_ERROR_RET 0
+#define MLX5_RECOVERY_IGNORE_RET 1
+#define MLX5_RECOVERY_COMPLETED_RET 2
 
 /**
  * Handle a Rx error.
@@ -1000,12 +1005,18 @@
  * @param[in] vec
  *   1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
  *   0 when called from non-vectorized Rx burst.
+ * @param[in] err_n
+ *   Number of CQEs to check for an error.
  *
  * @return
- *   MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status.
+ *   MLX5_RECOVERY_ERROR_RET in case of recovery error,
+ *   MLX5_RECOVERY_IGNORE_RET in case of non-critical error syndrome,
+ *   MLX5_RECOVERY_COMPLETED_RET in case of recovery is completed,
+ *   otherwise the CQE status after ignored error syndrome or queue reset.
  */
 int
-mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
+mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
+		   uint16_t err_n, uint16_t *skip_cnt)
 {
 	const uint16_t cqe_n = 1 << rxq->cqe_n;
 	const uint16_t cqe_mask = cqe_n - 1;
@@ -1017,13 +1028,39 @@
 		volatile struct mlx5_cqe *cqe;
 		volatile struct mlx5_err_cqe *err_cqe;
 	} u = {
-		.cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
+		.cqe = &(*rxq->cqes)[(rxq->cq_ci - vec) & cqe_mask],
 	};
 	struct mlx5_mp_arg_queue_state_modify sm;
-	int ret;
+	bool critical_syndrome = false;
+	int ret, i;
 
 	switch (rxq->err_state) {
+	case MLX5_RXQ_ERR_STATE_IGNORE:
+		ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci - vec);
+		if (ret != MLX5_CQE_STATUS_ERR) {
+			rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+			return ret;
+		}
+		/* Fall-through */
 	case MLX5_RXQ_ERR_STATE_NO_ERROR:
+		for (i = 0; i < (int)err_n; i++) {
+			u.cqe = &(*rxq->cqes)[(rxq->cq_ci - vec - i) & cqe_mask];
+			if (MLX5_CQE_OPCODE(u.cqe->op_own) == MLX5_CQE_RESP_ERR) {
+				if (u.err_cqe->syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+				    u.err_cqe->syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
+				    u.err_cqe->syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR)
+					critical_syndrome = true;
+				break;
+			}
+		}
+		if (!critical_syndrome) {
+			if (rxq->err_state == MLX5_RXQ_ERR_STATE_NO_ERROR) {
+				*skip_cnt = 0;
+				if (i == err_n)
+					rxq->err_state = MLX5_RXQ_ERR_STATE_IGNORE;
+			}
+			return MLX5_RECOVERY_IGNORE_RET;
+		}
 		rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
 		/* Fall-through */
 	case MLX5_RXQ_ERR_STATE_NEED_RESET:
@@ -1083,7 +1120,6 @@
 					rxq->elts_ci : rxq->rq_ci;
 				uint32_t elt_idx;
 				struct rte_mbuf **elt;
-				int i;
 				unsigned int n = elts_n - (elts_ci -
 							  rxq->rq_pi);
 
@@ -1116,6 +1152,7 @@
 			}
 			mlx5_rxq_initialize(rxq);
 			rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+			return MLX5_RECOVERY_COMPLETED_RET;
 		}
 		return ret;
 	default:
@@ -1135,19 +1172,24 @@
  * @param[out] mcqe
  *   Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
  *   written.
- *
+ * @param[out] skip_cnt
+ *   Number of packets skipped due to recoverable errors.
+ * @param mprq
+ *   Indication if it is called from MPRQ.
  * @return
- *   0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE,
- *   otherwise the packet size in regular RxQ, and striding byte
- *   count format in mprq case.
+ *   0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE,
+ *   MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset,
+ *   otherwise the packet size in regular RxQ,
+ *   and striding byte count format in mprq case.
  */
 static inline int
 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
-		 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
+		 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe,
+		 uint16_t *skip_cnt, bool mprq)
 {
 	struct rxq_zip *zip = &rxq->zip;
 	uint16_t cqe_n = cqe_cnt + 1;
-	int len;
+	int len = 0, ret = 0;
 	uint16_t idx, end;
 
 	do {
@@ -1196,7 +1238,6 @@
 		 * compressed.
 		 */
 		} else {
-			int ret;
 			int8_t op_own;
 			uint32_t cq_ci;
 
@@ -1204,10 +1245,12 @@
 			if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
 				if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
 					     rxq->err_state)) {
-					ret = mlx5_rx_err_handle(rxq, 0);
-					if (ret == MLX5_CQE_STATUS_HW_OWN ||
-					    ret == MLX5_RECOVERY_ERROR_RET)
-						return MLX5_ERROR_CQE_RET;
+					ret = mlx5_rx_err_handle(rxq, 0, 1, skip_cnt);
+					if (ret == MLX5_CQE_STATUS_HW_OWN)
+						return MLX5_ERROR_CQE_MASK;
+					if (ret == MLX5_RECOVERY_ERROR_RET ||
+						ret == MLX5_RECOVERY_COMPLETED_RET)
+						return MLX5_CRITICAL_ERROR_CQE_RET;
 				} else {
 					return 0;
 				}
@@ -1260,8 +1303,15 @@
 			}
 		}
 		if (unlikely(rxq->err_state)) {
+			if (rxq->err_state == MLX5_RXQ_ERR_STATE_IGNORE &&
+			    ret == MLX5_CQE_STATUS_SW_OWN) {
+				rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+				return len & MLX5_ERROR_CQE_MASK;
+			}
 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
 			++rxq->stats.idropped;
+			(*skip_cnt) += mprq ? (len & MLX5_MPRQ_STRIDE_NUM_MASK) >>
+				MLX5_MPRQ_STRIDE_NUM_SHIFT : 1;
 		} else {
 			return len;
 		}
@@ -1412,6 +1462,7 @@
 	int len = 0; /* keep its value across iterations. */
 
 	while (pkts_n) {
+		uint16_t skip_cnt;
 		unsigned int idx = rq_ci & wqe_cnt;
 		volatile struct mlx5_wqe_data_seg *wqe =
 			&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
@@ -1450,11 +1501,24 @@
 		}
 		if (!pkt) {
 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
-			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
-			if (len <= 0) {
-				rte_mbuf_raw_free(rep);
-				if (unlikely(len == MLX5_ERROR_CQE_RET))
+			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe, &skip_cnt, false);
+			if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
+				if (len == MLX5_CRITICAL_ERROR_CQE_RET) {
+					rte_mbuf_raw_free(rep);
 					rq_ci = rxq->rq_ci << sges_n;
+					break;
+				}
+				rq_ci >>= sges_n;
+				rq_ci += skip_cnt;
+				rq_ci <<= sges_n;
+				idx = rq_ci & wqe_cnt;
+				wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
+				seg = (*rxq->elts)[idx];
+				cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+				len = len & ~MLX5_ERROR_CQE_MASK;
+			}
+			if (len == 0) {
+				rte_mbuf_raw_free(rep);
 				break;
 			}
 			pkt = seg;
@@ -1678,6 +1742,7 @@
 		uint16_t strd_cnt;
 		uint16_t strd_idx;
 		uint32_t byte_cnt;
+		uint16_t skip_cnt;
 		volatile struct mlx5_mini_cqe8 *mcqe = NULL;
 		enum mlx5_rqx_code rxq_code;
 
@@ -1690,14 +1755,26 @@
 			buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
 		}
 		cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
-		ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
+		ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe, &skip_cnt, true);
+		if (unlikely(ret & MLX5_ERROR_CQE_MASK)) {
+			if (ret == MLX5_CRITICAL_ERROR_CQE_RET) {
+				rq_ci = rxq->rq_ci;
+				consumed_strd = rxq->consumed_strd;
+				break;
+			}
+			consumed_strd += skip_cnt;
+			while (consumed_strd >= strd_n) {
+				/* Replace WQE if the buffer is still in use. */
+				mprq_buf_replace(rxq, rq_ci & wq_mask);
+				/* Advance to the next WQE. */
+				consumed_strd -= strd_n;
+				++rq_ci;
+				buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+			}
+			cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+		}
 		if (ret == 0)
 			break;
-		if (unlikely(ret == MLX5_ERROR_CQE_RET)) {
-			rq_ci = rxq->rq_ci;
-			consumed_strd = rxq->consumed_strd;
-			break;
-		}
 		byte_cnt = ret;
 		len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
 		MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx.h dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx.h
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx.h	2023-04-27 18:57:22.000000000 +0100
@@ -91,6 +91,7 @@
 	MLX5_RXQ_ERR_STATE_NO_ERROR = 0,
 	MLX5_RXQ_ERR_STATE_NEED_RESET,
 	MLX5_RXQ_ERR_STATE_NEED_READY,
+	MLX5_RXQ_ERR_STATE_IGNORE,
 };
 
 enum mlx5_rqx_code {
@@ -427,7 +428,8 @@
 void mlx5_set_swp_types_table(void);
 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
 void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
-__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
+__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
+				      uint16_t err_n, uint16_t *skip_cnt);
 void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
 void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h	2023-04-27 18:57:22.000000000 +0100
@@ -783,7 +783,7 @@
 {
 	const uint16_t q_n = 1 << rxq->cqe_n;
 	const uint16_t q_mask = q_n - 1;
-	unsigned int pos;
+	unsigned int pos, adj;
 	uint64_t n = 0;
 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
 	uint16_t nocmp_n = 0;
@@ -866,7 +866,7 @@
 		vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
 		vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
 		vector unsigned char opcode, owner_mask, invalid_mask;
-		vector unsigned char comp_mask;
+		vector unsigned char comp_mask, mini_mask;
 		vector unsigned char mask;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		const vector unsigned char lower_half = {
@@ -1174,6 +1174,16 @@
 			(vector unsigned long)mask);
 
 		/* D.3 check error in opcode. */
+		adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n);
+		mask = (vector unsigned char)(vector unsigned long){
+			(adj * sizeof(uint16_t) * 8), 0};
+		lshift = vec_splat((vector unsigned long)mask, 0);
+		shmask = vec_cmpgt(shmax, lshift);
+		mini_mask = (vector unsigned char)
+			vec_sl((vector unsigned long)invalid_mask, lshift);
+		mini_mask = (vector unsigned char)
+			vec_sel((vector unsigned long)shmask,
+			(vector unsigned long)mini_mask, shmask);
 		opcode = (vector unsigned char)
 			vec_cmpeq((vector unsigned int)resp_err_check,
 			(vector unsigned int)opcode);
@@ -1182,7 +1192,7 @@
 			(vector unsigned int)zero);
 		opcode = (vector unsigned char)
 			vec_andc((vector unsigned long)opcode,
-			(vector unsigned long)invalid_mask);
+			(vector unsigned long)mini_mask);
 
 		/* D.4 mark if any error is set */
 		*err |= ((vector unsigned long)opcode)[0];
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec.c dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec.c
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec.c	2023-04-27 18:57:22.000000000 +0100
@@ -50,6 +50,7 @@
 			 uint16_t pkts_n)
 {
 	uint16_t n = 0;
+	uint16_t skip_cnt;
 	unsigned int i;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	uint32_t err_bytes = 0;
@@ -73,7 +74,7 @@
 	rxq->stats.ipackets -= (pkts_n - n);
 	rxq->stats.ibytes -= err_bytes;
 #endif
-	mlx5_rx_err_handle(rxq, 1);
+	mlx5_rx_err_handle(rxq, 1, pkts_n, &skip_cnt);
 	return n;
 }
 
@@ -247,8 +248,6 @@
 	}
 	rxq->rq_pi += i;
 	rxq->cq_ci += i;
-	rte_io_wmb();
-	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	if (rq_ci != rxq->rq_ci) {
 		rxq->rq_ci = rq_ci;
 		rte_io_wmb();
@@ -355,8 +354,6 @@
 			rxq->decompressed -= n;
 		}
 	}
-	rte_io_wmb();
-	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	*no_cq = !rcvd_pkt;
 	return rcvd_pkt;
 }
@@ -384,6 +381,7 @@
 	bool no_cq = false;
 
 	do {
+		err = 0;
 		nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn,
 				    &err, &no_cq);
 		if (unlikely(err | rxq->err_state))
@@ -391,6 +389,8 @@
 		tn += nb_rx;
 		if (unlikely(no_cq))
 			break;
+		rte_io_wmb();
+		*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	} while (tn != pkts_n);
 	return tn;
 }
@@ -518,6 +518,7 @@
 	bool no_cq = false;
 
 	do {
+		err = 0;
 		nb_rx = rxq_burst_mprq_v(rxq, pkts + tn, pkts_n - tn,
 					 &err, &no_cq);
 		if (unlikely(err | rxq->err_state))
@@ -525,6 +526,8 @@
 		tn += nb_rx;
 		if (unlikely(no_cq))
 			break;
+		rte_io_wmb();
+		*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	} while (tn != pkts_n);
 	return tn;
 }
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec_neon.h dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec_neon.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_neon.h	2023-04-27 18:57:22.000000000 +0100
@@ -524,7 +524,7 @@
 {
 	const uint16_t q_n = 1 << rxq->cqe_n;
 	const uint16_t q_mask = q_n - 1;
-	unsigned int pos;
+	unsigned int pos, adj;
 	uint64_t n = 0;
 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
 	uint16_t nocmp_n = 0;
@@ -616,7 +616,7 @@
 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
 		uint16x4_t op_own;
 		uint16x4_t opcode, owner_mask, invalid_mask;
-		uint16x4_t comp_mask;
+		uint16x4_t comp_mask, mini_mask;
 		uint16x4_t mask;
 		uint16x4_t byte_cnt;
 		uint32x4_t ptype_info, flow_tag;
@@ -780,8 +780,12 @@
 				   -1UL >> (n * sizeof(uint16_t) * 8) : 0);
 		invalid_mask = vorr_u16(invalid_mask, mask);
 		/* D.3 check error in opcode. */
+		adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n);
+		mask = vcreate_u16(adj ?
+			   -1UL >> ((n + 1) * sizeof(uint16_t) * 8) : -1UL);
+		mini_mask = vand_u16(invalid_mask, mask);
 		opcode = vceq_u16(resp_err_check, opcode);
-		opcode = vbic_u16(opcode, invalid_mask);
+		opcode = vbic_u16(opcode, mini_mask);
 		/* D.4 mark if any error is set */
 		*err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
 		/* C.4 fill in mbuf - rearm_data and packet_type. */
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec_sse.h dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx_vec_sse.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_sse.h	2023-04-27 18:57:22.000000000 +0100
@@ -523,7 +523,7 @@
 {
 	const uint16_t q_n = 1 << rxq->cqe_n;
 	const uint16_t q_mask = q_n - 1;
-	unsigned int pos;
+	unsigned int pos, adj;
 	uint64_t n = 0;
 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
 	uint16_t nocmp_n = 0;
@@ -591,7 +591,7 @@
 		__m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
 		__m128i op_own, op_own_tmp1, op_own_tmp2;
 		__m128i opcode, owner_mask, invalid_mask;
-		__m128i comp_mask;
+		__m128i comp_mask, mini_mask;
 		__m128i mask;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		__m128i byte_cnt;
@@ -729,9 +729,12 @@
 		mask = _mm_sll_epi64(ones, mask);
 		invalid_mask = _mm_or_si128(invalid_mask, mask);
 		/* D.3 check error in opcode. */
+		adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n);
+		mask = _mm_set_epi64x(0, adj * sizeof(uint16_t) * 8);
+		mini_mask = _mm_sll_epi64(invalid_mask, mask);
 		opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
 		opcode = _mm_packs_epi32(opcode, zero);
-		opcode = _mm_andnot_si128(invalid_mask, opcode);
+		opcode = _mm_andnot_si128(mini_mask, opcode);
 		/* D.4 mark if any error is set */
 		*err |= _mm_cvtsi128_si64(opcode);
 		/* D.5 fill in mbuf - rearm_data and packet_type. */
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_stats.c dpdk-20.11.8/drivers/net/mlx5/mlx5_stats.c
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_stats.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_stats.c	2023-04-27 18:57:22.000000000 +0100
@@ -285,10 +285,9 @@
 
 	if (n >= mlx5_xstats_n && xstats_names) {
 		for (i = 0; i != mlx5_xstats_n; ++i) {
-			strncpy(xstats_names[i].name,
+			strlcpy(xstats_names[i].name,
 				xstats_ctrl->info[i].dpdk_name,
 				RTE_ETH_XSTATS_NAME_SIZE);
-			xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
 		}
 	}
 	mlx5_xstats_n = mlx5_txpp_xstats_get_names(dev, xstats_names,
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_trigger.c dpdk-20.11.8/drivers/net/mlx5/mlx5_trigger.c
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_trigger.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_trigger.c	2023-04-27 18:57:22.000000000 +0100
@@ -871,11 +871,11 @@
 		}
 		/* Indeed, only the first used queue needs to be checked. */
 		if (txq_ctrl->hairpin_conf.manual_bind == 0) {
+			mlx5_txq_release(dev, i);
 			if (cur_port != rx_port) {
 				rte_errno = EINVAL;
 				DRV_LOG(ERR, "port %u and port %u are in"
 					" auto-bind mode", cur_port, rx_port);
-				mlx5_txq_release(dev, i);
 				return -rte_errno;
 			} else {
 				return 0;
diff -Nru dpdk-20.11.7/drivers/net/mlx5/mlx5_txpp.c dpdk-20.11.8/drivers/net/mlx5/mlx5_txpp.c
--- dpdk-20.11.7/drivers/net/mlx5/mlx5_txpp.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/mlx5/mlx5_txpp.c	2023-04-27 18:57:22.000000000 +0100
@@ -1222,11 +1222,9 @@
 
 	if (n >= n_used + n_txpp && xstats_names) {
 		for (i = 0; i < n_txpp; ++i) {
-			strncpy(xstats_names[i + n_used].name,
+			strlcpy(xstats_names[i + n_used].name,
 				mlx5_txpp_stat_names[i],
 				RTE_ETH_XSTATS_NAME_SIZE);
-			xstats_names[i + n_used].name
-					[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
 		}
 	}
 	return n_used + n_txpp;
diff -Nru dpdk-20.11.7/drivers/net/nfp/nfp_net.c dpdk-20.11.8/drivers/net/nfp/nfp_net.c
--- dpdk-20.11.7/drivers/net/nfp/nfp_net.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/nfp/nfp_net.c	2023-04-27 18:57:22.000000000 +0100
@@ -95,6 +95,8 @@
 static int nfp_set_mac_addr(struct rte_eth_dev *dev,
 			     struct rte_ether_addr *mac_addr);
 
+#define DEFAULT_FLBUF_SIZE        9216
+
 /* The offset of the queue controller queues in the PCIe Target */
 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
 
@@ -2652,7 +2654,7 @@
 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
+		rss_hf |= ETH_RSS_IPV4;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
@@ -2667,7 +2669,7 @@
 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
 
 	if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
+		rss_hf |= ETH_RSS_IPV6;
 
 	/* Propagate current RSS hash functions to caller */
 	rss_conf->rss_hf = rss_hf;
@@ -2942,6 +2944,7 @@
 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
 	hw->mtu = RTE_ETHER_MTU;
+	hw->flbufsz = DEFAULT_FLBUF_SIZE;
 
 	/* VLAN insertion is incompatible with LSOv2 */
 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
@@ -3538,7 +3541,7 @@
 
 	/* Then try the PCI name */
 	snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
-			dev->device.name);
+			dev->name);
 
 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
 	fw_f = open(fw_name, O_RDONLY);
diff -Nru dpdk-20.11.7/drivers/net/sfc/sfc_mae.c dpdk-20.11.8/drivers/net/sfc/sfc_mae.c
--- dpdk-20.11.7/drivers/net/sfc/sfc_mae.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/sfc/sfc_mae.c	2023-04-27 18:57:22.000000000 +0100
@@ -2136,6 +2136,21 @@
 	if (rc != 0)
 		goto fail_rule_parse_action;
 
+	/*
+	 * A DPDK flow entry must specify a fate action, which the parser
+	 * converts into a DELIVER action in a libefx action set. An
+	 * attempt to replace the action in the action set should
+	 * fail. If it succeeds then report an error, as the
+	 * parsed flow entry did not contain a fate action.
+	 */
+	rc = efx_mae_action_set_populate_drop(spec);
+	if (rc == 0) {
+		rc = rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					"no fate action found");
+		goto fail_check_fate_action;
+	}
+
 	*action_setp = sfc_mae_action_set_attach(sa, spec);
 	if (*action_setp != NULL) {
 		efx_mae_action_set_spec_fini(sa->nic, spec);
@@ -2149,6 +2164,7 @@
 	return 0;
 
 fail_action_set_add:
+fail_check_fate_action:
 fail_rule_parse_action:
 	efx_mae_action_set_spec_fini(sa->nic, spec);
 
diff -Nru dpdk-20.11.7/drivers/net/txgbe/base/txgbe_phy.c dpdk-20.11.8/drivers/net/txgbe/base/txgbe_phy.c
--- dpdk-20.11.7/drivers/net/txgbe/base/txgbe_phy.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/txgbe/base/txgbe_phy.c	2023-04-27 18:57:22.000000000 +0100
@@ -1491,9 +1491,10 @@
 	wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00);
 
 	value = (0x1804 & ~0x3F3F);
+	value |= 40 << 8;
 	wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
 
-	value = (0x50 & ~0x7F) | 40 | (1 << 6);
+	value = (0x50 & ~0x7F) | (1 << 6);
 	wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
 
 	for (i = 0; i < 4; i++) {
@@ -1701,10 +1702,10 @@
 
 	wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00);
 
-	value = (0x1804 & ~0x3F3F) | (24 << 8) | 4;
+	value = (0x1804 & ~0x3F3F) | (24 << 8);
 	wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
 
-	value = (0x50 & ~0x7F) | 16 | (1 << 6);
+	value = (0x50 & ~0x7F) | (1 << 6);
 	wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
 
 	for (i = 0; i < 4; i++) {
diff -Nru dpdk-20.11.7/drivers/net/txgbe/txgbe_ethdev.c dpdk-20.11.8/drivers/net/txgbe/txgbe_ethdev.c
--- dpdk-20.11.7/drivers/net/txgbe/txgbe_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/txgbe/txgbe_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -2682,9 +2682,6 @@
 			intr_handle->type != RTE_INTR_HANDLE_VFIO_MSIX)
 		wr32(hw, TXGBE_PX_INTA, 1);
 
-	/* clear all cause mask */
-	txgbe_disable_intr(hw);
-
 	/* read-on-clear nic registers here */
 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
@@ -2704,6 +2701,8 @@
 	if (eicr & TXGBE_ICRMISC_GPIO)
 		intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
 
+	((u32 *)hw->isb_mem)[TXGBE_ISB_MISC] = 0;
+
 	return 0;
 }
 
diff -Nru dpdk-20.11.7/drivers/net/txgbe/txgbe_rxtx.c dpdk-20.11.8/drivers/net/txgbe/txgbe_rxtx.c
--- dpdk-20.11.7/drivers/net/txgbe/txgbe_rxtx.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/txgbe/txgbe_rxtx.c	2023-04-27 18:57:22.000000000 +0100
@@ -495,20 +495,21 @@
 	return cmdtype;
 }
 
-static inline uint8_t
-tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+static inline uint32_t
+tx_desc_ol_flags_to_ptype(uint64_t oflags)
 {
+	uint32_t ptype;
 	bool tun;
 
-	if (ptype)
-		return txgbe_encode_ptype(ptype);
-
 	/* Only support flags in TXGBE_TX_OFFLOAD_MASK */
 	tun = !!(oflags & PKT_TX_TUNNEL_MASK);
 
 	/* L2 level */
 	ptype = RTE_PTYPE_L2_ETHER;
 	if (oflags & PKT_TX_VLAN)
+		ptype |= (tun ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN);
+
+	if (oflags & PKT_TX_QINQ) /* tunnel + QINQ is not supported */
 		ptype |= RTE_PTYPE_L2_ETHER_VLAN;
 
 	/* L3 level */
@@ -572,6 +573,16 @@
 		break;
 	}
 
+	return ptype;
+}
+
+static inline uint8_t
+tx_desc_ol_flags_to_ptid(uint64_t oflags)
+{
+	uint32_t ptype;
+
+	ptype = tx_desc_ol_flags_to_ptype(oflags);
+
 	return txgbe_encode_ptype(ptype);
 }
 
@@ -731,8 +742,7 @@
 		/* If hardware offload required */
 		tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
 		if (tx_ol_req) {
-			tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
-					tx_pkt->packet_type);
+			tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req);
 			tx_offload.l2_len = tx_pkt->l2_len;
 			tx_offload.l3_len = tx_pkt->l3_len;
 			tx_offload.l4_len = tx_pkt->l4_len;
@@ -4233,7 +4243,7 @@
 		 */
 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
 			RTE_PKTMBUF_HEADROOM);
-		buf_size = ROUND_UP(buf_size, 0x1 << 10);
+		buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
 		srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
 
 		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
diff -Nru dpdk-20.11.7/drivers/net/vhost/rte_eth_vhost.c dpdk-20.11.8/drivers/net/vhost/rte_eth_vhost.c
--- dpdk-20.11.7/drivers/net/vhost/rte_eth_vhost.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/vhost/rte_eth_vhost.c	2023-04-27 18:57:22.000000000 +0100
@@ -97,8 +97,9 @@
 	uint16_t port;
 	uint16_t virtqueue_id;
 	struct vhost_stats stats;
-	int intr_enable;
 	rte_spinlock_t intr_lock;
+	struct epoll_event ev;
+	int kickfd;
 };
 
 struct pmd_internal {
@@ -525,112 +526,68 @@
 	return list;
 }
 
-static int
+static void
 eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
 {
-	struct rte_intr_handle *handle = eth_dev->intr_handle;
-	struct rte_epoll_event rev;
-	int epfd, ret;
+	struct rte_vhost_vring vring;
+	struct vhost_queue *vq;
 
-	if (!handle)
-		return 0;
+	vq = eth_dev->data->rx_queues[rxq_idx];
+	if (vq == NULL || vq->vid < 0)
+		return;
 
-	if (handle->efds[rxq_idx] == handle->elist[rxq_idx].fd)
-		return 0;
+	if (rte_vhost_get_vhost_vring(vq->vid, (rxq_idx << 1) + 1, &vring) < 0) {
+		VHOST_LOG(DEBUG, "Failed to get rxq-%d's vring, skip!\n", rxq_idx);
+		return;
+	}
 
-	VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n",
-			rxq_idx);
+	rte_spinlock_lock(&vq->intr_lock);
 
-	if (handle->elist[rxq_idx].fd != -1)
-		VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n",
-				handle->elist[rxq_idx].fd);
-
-	/*
-	 * First remove invalid epoll event, and then install
-	 * the new one. May be solved with a proper API in the
-	 * future.
-	 */
-	epfd = handle->elist[rxq_idx].epfd;
-	rev = handle->elist[rxq_idx];
-	ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd,
-			&handle->elist[rxq_idx]);
-	if (ret) {
-		VHOST_LOG(ERR, "Delete epoll event failed.\n");
-		return ret;
-	}
-
-	rev.fd = handle->efds[rxq_idx];
-	handle->elist[rxq_idx] = rev;
-	ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd,
-			&handle->elist[rxq_idx]);
-	if (ret) {
-		VHOST_LOG(ERR, "Add epoll event failed.\n");
-		return ret;
+	/* Remove previous kickfd from proxy epoll */
+	if (vq->kickfd >= 0 && vq->kickfd != vring.kickfd) {
+		if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) {
+			VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n",
+				vq->kickfd, rxq_idx, strerror(errno));
+		} else {
+			VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n",
+				vq->kickfd, rxq_idx);
+		}
+		vq->kickfd = -1;
 	}
 
-	return 0;
+	/* Add new one, if valid */
+	if (vq->kickfd != vring.kickfd && vring.kickfd >= 0) {
+		if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_ADD, vring.kickfd, &vq->ev) < 0) {
+			VHOST_LOG(ERR, "Failed to register %d in rxq-%d epoll: %s\n",
+				vring.kickfd, rxq_idx, strerror(errno));
+		} else {
+			vq->kickfd = vring.kickfd;
+			VHOST_LOG(DEBUG, "Registered %d in rxq-%d epoll\n",
+				vq->kickfd, rxq_idx);
+		}
+	}
+
+	rte_spinlock_unlock(&vq->intr_lock);
 }
 
 static int
 eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
 {
-	struct rte_vhost_vring vring;
-	struct vhost_queue *vq;
-	int old_intr_enable, ret = 0;
+	struct vhost_queue *vq = dev->data->rx_queues[qid];
 
-	vq = dev->data->rx_queues[qid];
-	if (!vq) {
-		VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
-		return -1;
-	}
+	if (vq->vid >= 0)
+		rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
 
-	rte_spinlock_lock(&vq->intr_lock);
-	old_intr_enable = vq->intr_enable;
-	vq->intr_enable = 1;
-	ret = eth_vhost_update_intr(dev, qid);
-	rte_spinlock_unlock(&vq->intr_lock);
-
-	if (ret < 0) {
-		VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid);
-		vq->intr_enable = old_intr_enable;
-		return ret;
-	}
-
-	ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
-	if (ret < 0) {
-		VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
-		return ret;
-	}
-	VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
-	rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
-	rte_wmb();
-
-	return ret;
+	return 0;
 }
 
 static int
 eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
 {
-	struct rte_vhost_vring vring;
-	struct vhost_queue *vq;
-	int ret = 0;
-
-	vq = dev->data->rx_queues[qid];
-	if (!vq) {
-		VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
-		return -1;
-	}
+	struct vhost_queue *vq = dev->data->rx_queues[qid];
 
-	ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
-	if (ret < 0) {
-		VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
-		return ret;
-	}
-	VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
-	rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
-	rte_wmb();
-
-	vq->intr_enable = 0;
+	if (vq->vid >= 0)
+		rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
 
 	return 0;
 }
@@ -641,70 +598,64 @@
 	struct rte_intr_handle *intr_handle = dev->intr_handle;
 
 	if (intr_handle) {
+		int i;
+
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			int epoll_fd = intr_handle->efds[i];
+
+			if (epoll_fd >= 0)
+				close(epoll_fd);
+		}
 		if (intr_handle->intr_vec)
 			free(intr_handle->intr_vec);
 		free(intr_handle);
 	}
-
 	dev->intr_handle = NULL;
 }
 
 static int
 eth_vhost_install_intr(struct rte_eth_dev *dev)
 {
-	struct rte_vhost_vring vring;
-	struct vhost_queue *vq;
 	int nb_rxq = dev->data->nb_rx_queues;
-	int i;
-	int ret;
+	struct vhost_queue *vq;
 
-	/* uninstall firstly if we are reconnecting */
-	if (dev->intr_handle)
-		eth_vhost_uninstall_intr(dev);
+	int ret;
+	int i;
 
 	dev->intr_handle = malloc(sizeof(*dev->intr_handle));
 	if (!dev->intr_handle) {
 		VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto error;
 	}
 	memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
 
-	dev->intr_handle->efd_counter_size = sizeof(uint64_t);
-
 	dev->intr_handle->intr_vec =
 		malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
 
 	if (!dev->intr_handle->intr_vec) {
 		VHOST_LOG(ERR,
 			"Failed to allocate memory for interrupt vector\n");
-		free(dev->intr_handle);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto error;
 	}
 
-	VHOST_LOG(INFO, "Prepare intr vec\n");
+	VHOST_LOG(DEBUG, "Prepare intr vec\n");
 	for (i = 0; i < nb_rxq; i++) {
-		dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
-		dev->intr_handle->efds[i] = -1;
-		vq = dev->data->rx_queues[i];
-		if (!vq) {
-			VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
-			continue;
-		}
+		int epoll_fd = epoll_create1(0);
 
-		ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
-		if (ret < 0) {
-			VHOST_LOG(INFO,
-				"Failed to get rxq-%d's vring, skip!\n", i);
-			continue;
+		if (epoll_fd < 0) {
+			VHOST_LOG(ERR, "Failed to create proxy epoll fd for rxq-%d\n", i);
+			ret = -errno;
+			goto error;
 		}
 
-		if (vring.kickfd < 0) {
-			VHOST_LOG(INFO,
-				"rxq-%d's kickfd is invalid, skip!\n", i);
-			continue;
-		}
-		dev->intr_handle->efds[i] = vring.kickfd;
-		VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
+		dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+		dev->intr_handle->efds[i] = epoll_fd;
+		vq = dev->data->rx_queues[i];
+		memset(&vq->ev, 0, sizeof(vq->ev));
+		vq->ev.events = EPOLLIN;
+		vq->ev.data.fd = epoll_fd;
 	}
 
 	dev->intr_handle->nb_efd = nb_rxq;
@@ -712,6 +663,50 @@
 	dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
 
 	return 0;
+
+error:
+	eth_vhost_uninstall_intr(dev);
+	return ret;
+}
+
+static void
+eth_vhost_configure_intr(struct rte_eth_dev *dev)
+{
+	int i;
+
+	VHOST_LOG(DEBUG, "Configure intr vec\n");
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		eth_vhost_update_intr(dev, i);
+}
+
+static void
+eth_vhost_unconfigure_intr(struct rte_eth_dev *eth_dev)
+{
+	struct vhost_queue *vq;
+	int i;
+
+	VHOST_LOG(DEBUG, "Unconfigure intr vec\n");
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+		vq = eth_dev->data->rx_queues[i];
+		if (vq == NULL || vq->vid < 0)
+			continue;
+
+		rte_spinlock_lock(&vq->intr_lock);
+
+		/* Remove previous kickfd from proxy epoll */
+		if (vq->kickfd >= 0) {
+			if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) {
+				VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n",
+					vq->kickfd, i, strerror(errno));
+			} else {
+				VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n",
+					vq->kickfd, i);
+			}
+			vq->kickfd = -1;
+		}
+
+		rte_spinlock_unlock(&vq->intr_lock);
+	}
 }
 
 static void
@@ -815,16 +810,8 @@
 	internal->vid = vid;
 	if (rte_atomic32_read(&internal->started) == 1) {
 		queue_setup(eth_dev, internal);
-
-		if (dev_conf->intr_conf.rxq) {
-			if (eth_vhost_install_intr(eth_dev) < 0) {
-				VHOST_LOG(INFO,
-					"Failed to install interrupt handler.");
-					return -1;
-			}
-		}
-	} else {
-		VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
+		if (dev_conf->intr_conf.rxq)
+			eth_vhost_configure_intr(eth_dev);
 	}
 
 	for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
@@ -866,6 +853,7 @@
 
 	rte_atomic32_set(&internal->dev_attached, 0);
 	update_queuing_status(eth_dev, true);
+	eth_vhost_unconfigure_intr(eth_dev);
 
 	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
 
@@ -894,53 +882,11 @@
 	rte_spinlock_unlock(&state->lock);
 
 	VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
-	eth_vhost_uninstall_intr(eth_dev);
 
 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 }
 
 static int
-vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
-{
-	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
-	struct pmd_internal *internal = eth_dev->data->dev_private;
-	struct vhost_queue *vq;
-	struct rte_vhost_vring vring;
-	int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
-	int ret = 0;
-
-	/*
-	 * The vring kickfd may be changed after the new device notification.
-	 * Update it when the vring state is updated.
-	 */
-	if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
-	    rte_atomic32_read(&internal->dev_attached) &&
-	    rte_atomic32_read(&internal->started) &&
-	    dev_conf->intr_conf.rxq) {
-		ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
-		if (ret) {
-			VHOST_LOG(ERR, "Failed to get vring %d information.\n",
-					vring_id);
-			return ret;
-		}
-		eth_dev->intr_handle->efds[rx_idx] = vring.kickfd;
-
-		vq = eth_dev->data->rx_queues[rx_idx];
-		if (!vq) {
-			VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
-			return -1;
-		}
-
-		rte_spinlock_lock(&vq->intr_lock);
-		if (vq->intr_enable)
-			ret = eth_vhost_update_intr(eth_dev, rx_idx);
-		rte_spinlock_unlock(&vq->intr_lock);
-	}
-
-	return ret;
-}
-
-static int
 vring_state_changed(int vid, uint16_t vring, int enable)
 {
 	struct rte_vhost_vring_state *state;
@@ -959,9 +905,8 @@
 	/* won't be NULL */
 	state = vring_states[eth_dev->data->port_id];
 
-	if (enable && vring_conf_update(vid, eth_dev, vring))
-		VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n",
-			  (int)vring);
+	if (eth_dev->data->dev_conf.intr_conf.rxq && vring % 2)
+		eth_vhost_update_intr(eth_dev, (vring - 1) >> 1);
 
 	rte_spinlock_lock(&state->lock);
 	if (state->cur[vring] == enable) {
@@ -1146,18 +1091,17 @@
 	struct pmd_internal *internal = eth_dev->data->dev_private;
 	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
 
-	queue_setup(eth_dev, internal);
-
-	if (rte_atomic32_read(&internal->dev_attached) == 1) {
-		if (dev_conf->intr_conf.rxq) {
-			if (eth_vhost_install_intr(eth_dev) < 0) {
-				VHOST_LOG(INFO,
-					"Failed to install interrupt handler.");
-					return -1;
-			}
-		}
+	eth_vhost_uninstall_intr(eth_dev);
+	if (dev_conf->intr_conf.rxq && eth_vhost_install_intr(eth_dev) < 0) {
+		VHOST_LOG(ERR, "Failed to install interrupt handler.\n");
+		return -1;
 	}
 
+	queue_setup(eth_dev, internal);
+	if (rte_atomic32_read(&internal->dev_attached) == 1 &&
+			dev_conf->intr_conf.rxq)
+		eth_vhost_configure_intr(eth_dev);
+
 	rte_atomic32_set(&internal->started, 1);
 	update_queuing_status(eth_dev, false);
 
@@ -1212,6 +1156,8 @@
 	rte_free(internal->iface_name);
 	rte_free(internal);
 
+	eth_vhost_uninstall_intr(dev);
+
 	dev->data->dev_private = NULL;
 
 	rte_free(vring_states[dev->data->port_id]);
@@ -1239,6 +1185,7 @@
 	vq->mb_pool = mb_pool;
 	vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
 	rte_spinlock_init(&vq->intr_lock);
+	vq->kickfd = -1;
 	dev->data->rx_queues[rx_queue_id] = vq;
 
 	return 0;
@@ -1261,6 +1208,7 @@
 
 	vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
 	rte_spinlock_init(&vq->intr_lock);
+	vq->kickfd = -1;
 	dev->data->tx_queues[tx_queue_id] = vq;
 
 	return 0;
diff -Nru dpdk-20.11.7/drivers/net/virtio/virtio_ethdev.c dpdk-20.11.8/drivers/net/virtio/virtio_ethdev.c
--- dpdk-20.11.7/drivers/net/virtio/virtio_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/virtio/virtio_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -2097,6 +2097,9 @@
 static int vdpa_check_handler(__rte_unused const char *key,
 		const char *value, void *ret_val)
 {
+	if (value == NULL || ret_val == NULL)
+		return -EINVAL;
+
 	if (strcmp(value, "1") == 0)
 		*(int *)ret_val = 1;
 	else
@@ -2134,6 +2137,9 @@
 static int vectorized_check_handler(__rte_unused const char *key,
 		const char *value, void *ret_val)
 {
+	if (value == NULL || ret_val == NULL)
+		return -EINVAL;
+
 	if (strcmp(value, "1") == 0)
 		*(int *)ret_val = 1;
 	else
diff -Nru dpdk-20.11.7/drivers/net/virtio/virtio_pci.c dpdk-20.11.8/drivers/net/virtio/virtio_pci.c
--- dpdk-20.11.7/drivers/net/virtio/virtio_pci.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/virtio/virtio_pci.c	2023-04-27 18:57:22.000000000 +0100
@@ -31,22 +31,6 @@
 #define VIRTIO_PCI_CONFIG(hw) \
 		(((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
 
-static inline int
-check_vq_phys_addr_ok(struct virtqueue *vq)
-{
-	/* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
-	 * and only accepts 32 bit page frame number.
-	 * Check if the allocated physical memory exceeds 16TB.
-	 */
-	if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
-			(VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
-		PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
-		return 0;
-	}
-
-	return 1;
-}
-
 /*
  * Since we are in legacy mode:
  * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
@@ -213,8 +197,15 @@
 {
 	uint32_t src;
 
-	if (!check_vq_phys_addr_ok(vq))
+	/* Virtio PCI device VIRTIO_PCI_QUEUE_PFN register is 32bit,
+	 * and only accepts 32 bit page frame number.
+	 * Check if the allocated physical memory exceeds 16TB.
+	 */
+	if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+			(VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+		PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
 		return -1;
+	}
 
 	rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
 		VIRTIO_PCI_QUEUE_SEL);
@@ -366,9 +357,6 @@
 	uint64_t desc_addr, avail_addr, used_addr;
 	uint16_t notify_off;
 
-	if (!check_vq_phys_addr_ok(vq))
-		return -1;
-
 	desc_addr = vq->vq_ring_mem;
 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
diff -Nru dpdk-20.11.7/drivers/net/virtio/virtio_rxtx.c dpdk-20.11.8/drivers/net/virtio/virtio_rxtx.c
--- dpdk-20.11.7/drivers/net/virtio/virtio_rxtx.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/virtio/virtio_rxtx.c	2023-04-27 18:57:22.000000000 +0100
@@ -421,29 +421,36 @@
 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
 			m->l4_len)) {
 		struct rte_ipv4_hdr *iph;
-		struct rte_ipv6_hdr *ip6h;
 		struct rte_tcp_hdr *th;
-		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
+		uint16_t prev_cksum, new_cksum;
+		uint32_t ip_paylen;
 		uint32_t tmp;
 
 		iph = rte_pktmbuf_mtod_offset(m,
 					struct rte_ipv4_hdr *, m->l2_len);
 		th = RTE_PTR_ADD(iph, m->l3_len);
+
+		/*
+		 * Calculate IPv4 header checksum with current total length value
+		 * (whatever it is) to have correct checksum after update on edits
+		 * done by TSO.
+		 */
 		if ((iph->version_ihl >> 4) == 4) {
 			iph->hdr_checksum = 0;
 			iph->hdr_checksum = rte_ipv4_cksum(iph);
-			ip_len = iph->total_length;
-			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
-				m->l3_len);
-		} else {
-			ip6h = (struct rte_ipv6_hdr *)iph;
-			ip_paylen = ip6h->payload_len;
 		}
 
+		/*
+		 * Do not use IPv4 total length and IPv6 payload length fields to get
+		 * TSO payload length since it could not fit into 16 bits.
+		 */
+		ip_paylen = rte_cpu_to_be_32(rte_pktmbuf_pkt_len(m) - m->l2_len -
+					m->l3_len);
+
 		/* calculate the new phdr checksum not including ip_paylen */
 		prev_cksum = th->cksum;
 		tmp = prev_cksum;
-		tmp += ip_paylen;
+		tmp += (ip_paylen & 0xffff) + (ip_paylen >> 16);
 		tmp = (tmp & 0xffff) + (tmp >> 16);
 		new_cksum = tmp;
 
diff -Nru dpdk-20.11.7/drivers/net/virtio/virtio_user_ethdev.c dpdk-20.11.8/drivers/net/virtio/virtio_user_ethdev.c
--- dpdk-20.11.7/drivers/net/virtio/virtio_user_ethdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/net/virtio/virtio_user_ethdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -317,10 +317,15 @@
 	if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK &&
 			~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK)
 		virtio_user_dev_set_features(dev);
-	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
-		virtio_user_start_device(dev);
-	else if (status == VIRTIO_CONFIG_STATUS_RESET)
+
+	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) {
+		if (virtio_user_start_device(dev)) {
+			virtio_user_dev_update_status(dev);
+			return;
+		}
+	} else if (status == VIRTIO_CONFIG_STATUS_RESET) {
 		virtio_user_reset(hw);
+	}
 
 	virtio_user_dev_set_status(dev, status);
 }
diff -Nru dpdk-20.11.7/drivers/raw/ifpga/base/opae_hw_api.c dpdk-20.11.8/drivers/raw/ifpga/base/opae_hw_api.c
--- dpdk-20.11.7/drivers/raw/ifpga/base/opae_hw_api.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/raw/ifpga/base/opae_hw_api.c	2023-04-27 18:57:22.000000000 +0100
@@ -380,7 +380,7 @@
 				PROT_READ | PROT_WRITE, MAP_SHARED,
 				shm_id, 0);
 		adapter->lock = (pthread_mutex_t *)ptr;
-		if (ptr) {
+		if (ptr != MAP_FAILED) {
 			dev_info(NULL,
 					"shared memory %s address is %p\n",
 					shm_name, ptr);
@@ -497,7 +497,7 @@
 		adapter->shm.size = size;
 		adapter->shm.ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
 							MAP_SHARED, shm_id, 0);
-		if (adapter->shm.ptr) {
+		if (adapter->shm.ptr != MAP_FAILED) {
 			dev_info(NULL,
 					"shared memory %s address is %p\n",
 					shm_name, adapter->shm.ptr);
diff -Nru dpdk-20.11.7/drivers/raw/skeleton/skeleton_rawdev.c dpdk-20.11.8/drivers/raw/skeleton/skeleton_rawdev.c
--- dpdk-20.11.7/drivers/raw/skeleton/skeleton_rawdev.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/raw/skeleton/skeleton_rawdev.c	2023-04-27 18:57:22.000000000 +0100
@@ -421,7 +421,7 @@
 	 * help in complex implementation which require more information than
 	 * just an integer - for example, a queue-pair.
 	 */
-	q_id = *((int *)context);
+	q_id = *((uint16_t *)context);
 
 	for (i = 0; i < count; i++)
 		queue_buf[q_id].bufs[i] = buffers[i]->buf_addr;
@@ -443,7 +443,7 @@
 	 * help in complex implementation which require more information than
 	 * just an integer - for example, a queue-pair.
 	 */
-	q_id = *((int *)context);
+	q_id = *((uint16_t *)context);
 
 	for (i = 0; i < count; i++)
 		buffers[i]->buf_addr = queue_buf[q_id].bufs[i];
@@ -659,6 +659,8 @@
 		     void *opaque)
 {
 	int *flag = opaque;
+	if (value == NULL || opaque == NULL)
+		return -EINVAL;
 	*flag = atoi(value);
 	return 0;
 }
diff -Nru dpdk-20.11.7/drivers/raw/skeleton/skeleton_rawdev_test.c dpdk-20.11.8/drivers/raw/skeleton/skeleton_rawdev_test.c
--- dpdk-20.11.7/drivers/raw/skeleton/skeleton_rawdev_test.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/raw/skeleton/skeleton_rawdev_test.c	2023-04-27 18:57:22.000000000 +0100
@@ -368,42 +368,34 @@
 test_rawdev_enqdeq(void)
 {
 	int ret;
-	unsigned int count = 1;
 	uint16_t queue_id = 0;
-	struct rte_rawdev_buf buffers[1];
-	struct rte_rawdev_buf *deq_buffers = NULL;
-
-	buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3);
-	if (!buffers[0].buf_addr)
-		goto cleanup;
-	snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d",
+	struct rte_rawdev_buf buffer;
+	struct rte_rawdev_buf *buffers[1];
+	struct rte_rawdev_buf deq_buffer;
+	struct rte_rawdev_buf *deq_buffers[1];
+
+	buffers[0] = &buffer;
+	buffer.buf_addr = malloc(strlen(TEST_DEV_NAME) + 3);
+	if (!buffer.buf_addr)
+		return TEST_FAILED;
+	snprintf(buffer.buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d",
 		 TEST_DEV_NAME, 0);
 
-	ret = rte_rawdev_enqueue_buffers(test_dev_id,
-					 (struct rte_rawdev_buf **)&buffers,
-					 count, &queue_id);
-	RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+	ret = rte_rawdev_enqueue_buffers(test_dev_id, buffers,
+					 RTE_DIM(buffers), &queue_id);
+	RTE_TEST_ASSERT_EQUAL((unsigned int)ret, RTE_DIM(buffers),
 			      "Unable to enqueue buffers");
 
-	deq_buffers = malloc(sizeof(struct rte_rawdev_buf) * count);
-	if (!deq_buffers)
-		goto cleanup;
-
-	ret = rte_rawdev_dequeue_buffers(test_dev_id,
-					(struct rte_rawdev_buf **)&deq_buffers,
-					count, &queue_id);
-	RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+	deq_buffers[0] = &deq_buffer;
+	ret = rte_rawdev_dequeue_buffers(test_dev_id, deq_buffers,
+					RTE_DIM(deq_buffers), &queue_id);
+	RTE_TEST_ASSERT_EQUAL((unsigned int)ret, RTE_DIM(buffers),
 			      "Unable to dequeue buffers");
+	RTE_TEST_ASSERT_EQUAL(deq_buffers[0]->buf_addr, buffers[0]->buf_addr,
+			      "Did not retrieve expected object");
 
-	if (deq_buffers)
-		free(deq_buffers);
-
+	free(buffer.buf_addr);
 	return TEST_SUCCESS;
-cleanup:
-	if (buffers[0].buf_addr)
-		free(buffers[0].buf_addr);
-
-	return TEST_FAILED;
 }
 
 static void skeldev_test_run(int (*setup)(void),
diff -Nru dpdk-20.11.7/drivers/vdpa/ifc/ifcvf_vdpa.c dpdk-20.11.8/drivers/vdpa/ifc/ifcvf_vdpa.c
--- dpdk-20.11.7/drivers/vdpa/ifc/ifcvf_vdpa.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/drivers/vdpa/ifc/ifcvf_vdpa.c	2023-04-27 18:57:22.000000000 +0100
@@ -842,6 +842,8 @@
 	vdpa_ifcvf_stop(internal);
 	vdpa_disable_vfio_intr(internal);
 
+	rte_atomic32_set(&internal->running, 0);
+
 	ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false);
 	if (ret && ret != -ENOTSUP)
 		goto error;
@@ -1253,6 +1255,11 @@
 			goto error;
 	}
 	internal->sw_lm = sw_fallback_lm;
+	if (!internal->sw_lm && !internal->hw.lm_cfg) {
+		DRV_LOG(ERR, "Device %s does not support HW assist live migration, please enable sw-live-migration!",
+			pci_dev->name);
+		goto error;
+	}
 
 	internal->vdev = rte_vdpa_register_device(&pci_dev->device, &ifcvf_ops);
 	if (internal->vdev == NULL) {
diff -Nru dpdk-20.11.7/examples/cmdline/parse_obj_list.h dpdk-20.11.8/examples/cmdline/parse_obj_list.h
--- dpdk-20.11.7/examples/cmdline/parse_obj_list.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/examples/cmdline/parse_obj_list.h	2023-04-27 18:57:22.000000000 +0100
@@ -12,8 +12,9 @@
 
 #include <sys/queue.h>
 #include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
 
-#define OBJ_NAME_LEN_MAX 64
+#define OBJ_NAME_LEN_MAX sizeof(cmdline_fixed_string_t)
 
 struct object {
 	SLIST_ENTRY(object) next;
diff -Nru dpdk-20.11.7/examples/qos_sched/init.c dpdk-20.11.8/examples/qos_sched/init.c
--- dpdk-20.11.7/examples/qos_sched/init.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/examples/qos_sched/init.c	2023-04-27 18:57:22.000000000 +0100
@@ -82,6 +82,7 @@
 	if (app_inited_port_mask & (1u << portid))
 		return 0;
 
+	memset(&rx_conf, 0, sizeof(struct rte_eth_rxconf));
 	rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
 	rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
 	rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
@@ -89,6 +90,7 @@
 	rx_conf.rx_drop_en = 0;
 	rx_conf.rx_deferred_start = 0;
 
+	memset(&tx_conf, 0, sizeof(struct rte_eth_txconf));
 	tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
 	tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
 	tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
@@ -385,6 +387,8 @@
 	for(i = 0; i < nb_pfc; i++) {
 		uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
 		struct rte_ring *ring;
+		struct rte_eth_link link = {0};
+		int retry_count = 100, retry_delay = 100; /* try every 100ms for 10 sec */
 
 		snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
 		ring = rte_ring_lookup(ring_name);
@@ -415,6 +419,14 @@
 		app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
 		app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
 
+		rte_eth_link_get(qos_conf[i].tx_port, &link);
+		if (link.link_status == 0)
+			printf("Waiting for link on port %u\n", qos_conf[i].tx_port);
+		while (link.link_status == 0 && retry_count--) {
+			rte_delay_ms(retry_delay);
+			rte_eth_link_get(qos_conf[i].tx_port, &link);
+		}
+
 		qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
 	}
 
diff -Nru dpdk-20.11.7/examples/qos_sched/profile.cfg dpdk-20.11.8/examples/qos_sched/profile.cfg
--- dpdk-20.11.7/examples/qos_sched/profile.cfg	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/examples/qos_sched/profile.cfg	2023-04-27 18:57:22.000000000 +0100
@@ -26,6 +26,8 @@
 number of pipes per subport = 4096
 queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
 
+pipe 0-4095 = 0                ; These pipes are configured with pipe profile 0
+
 [subport profile 0]
 tb rate = 1250000000           ; Bytes per second
 tb size = 1000000              ; Bytes
@@ -46,8 +48,6 @@
 
 tc period = 10                 ; Milliseconds
 
-pipe 0-4095 = 0                ; These pipes are configured with pipe profile 0
-
 ; Pipe configuration
 [pipe profile 0]
 tb rate = 305175               ; Bytes per second
diff -Nru dpdk-20.11.7/examples/qos_sched/profile_ov.cfg dpdk-20.11.8/examples/qos_sched/profile_ov.cfg
--- dpdk-20.11.7/examples/qos_sched/profile_ov.cfg	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/examples/qos_sched/profile_ov.cfg	2023-04-27 18:57:22.000000000 +0100
@@ -6,12 +6,14 @@
 frame overhead = 24
 number of subports per port = 1
 
+subport 0-8 = 0
+
 ; Subport configuration
 [subport 0]
 number of pipes per subport = 32
 queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
 
-subport 0-8 = 0
+pipe 0-31 = 0               ; These pipes are configured with pipe profile 0
 
 [subport profile 0]
 tb rate = 8400000           ; Bytes per second
@@ -32,8 +34,6 @@
 tc 12 rate = 8400000         ; Bytes per second
 tc period = 10              ; Milliseconds
 
-pipe 0-31 = 0               ; These pipes are configured with pipe profile 0
-
 ; Pipe configuration
 [pipe profile 0]
 tb rate = 16800000             ; Bytes per second
diff -Nru dpdk-20.11.7/examples/vm_power_manager/channel_manager.c dpdk-20.11.8/examples/vm_power_manager/channel_manager.c
--- dpdk-20.11.7/examples/vm_power_manager/channel_manager.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/examples/vm_power_manager/channel_manager.c	2023-04-27 18:57:22.000000000 +0100
@@ -23,7 +23,6 @@
 #include <rte_log.h>
 #include <rte_atomic.h>
 #include <rte_spinlock.h>
-#include <rte_tailq.h>
 
 #include <libvirt/libvirt.h>
 
@@ -60,16 +59,16 @@
 	virDomainInfo info;
 	rte_spinlock_t config_spinlock;
 	int allow_query;
-	RTE_TAILQ_ENTRY(virtual_machine_info) vms_info;
+	LIST_ENTRY(virtual_machine_info) vms_info;
 };
 
-RTE_TAILQ_HEAD(, virtual_machine_info) vm_list_head;
+LIST_HEAD(, virtual_machine_info) vm_list_head;
 
 static struct virtual_machine_info *
 find_domain_by_name(const char *name)
 {
 	struct virtual_machine_info *info;
-	RTE_TAILQ_FOREACH(info, &vm_list_head, vms_info) {
+	LIST_FOREACH(info, &vm_list_head, vms_info) {
 		if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
 			return info;
 	}
@@ -878,7 +877,7 @@
 
 	new_domain->allow_query = 0;
 	rte_spinlock_init(&(new_domain->config_spinlock));
-	TAILQ_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
+	LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
 	return 0;
 }
 
@@ -900,7 +899,7 @@
 		rte_spinlock_unlock(&vm_info->config_spinlock);
 		return -1;
 	}
-	TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
+	LIST_REMOVE(vm_info, vms_info);
 	rte_spinlock_unlock(&vm_info->config_spinlock);
 	rte_free(vm_info);
 	return 0;
@@ -953,7 +952,7 @@
 {
 	virNodeInfo info;
 
-	TAILQ_INIT(&vm_list_head);
+	LIST_INIT(&vm_list_head);
 	if (connect_hypervisor(path) < 0) {
 		global_n_host_cpus = 64;
 		global_hypervisor_available = 0;
@@ -1005,9 +1004,9 @@
 {
 	unsigned i;
 	char mask[RTE_MAX_LCORE];
-	struct virtual_machine_info *vm_info, *tmp;
+	struct virtual_machine_info *vm_info;
 
-	RTE_TAILQ_FOREACH_SAFE(vm_info, &vm_list_head, vms_info, tmp) {
+	LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
 
 		rte_spinlock_lock(&(vm_info->config_spinlock));
 
@@ -1022,7 +1021,7 @@
 		}
 		rte_spinlock_unlock(&(vm_info->config_spinlock));
 
-		TAILQ_REMOVE(&vm_list_head, vm_info, vms_info);
+		LIST_REMOVE(vm_info, vms_info);
 		rte_free(vm_info);
 	}
 
diff -Nru dpdk-20.11.7/.github/workflows/build.yml dpdk-20.11.8/.github/workflows/build.yml
--- dpdk-20.11.7/.github/workflows/build.yml	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/.github/workflows/build.yml	2023-04-27 18:57:22.000000000 +0100
@@ -28,29 +28,29 @@
       fail-fast: false
       matrix:
         config:
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: gcc
             library: static
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: gcc
             library: shared
             checks: abi+doc+tests
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: clang
             library: static
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: clang
             library: shared
             checks: doc+tests
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: gcc
             library: static
             cross: i386
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: gcc
             library: static
             cross: aarch64
-          - os: ubuntu-18.04
+          - os: ubuntu-20.04
             compiler: gcc
             library: shared
             cross: aarch64
diff -Nru dpdk-20.11.7/kernel/linux/kni/compat.h dpdk-20.11.8/kernel/linux/kni/compat.h
--- dpdk-20.11.7/kernel/linux/kni/compat.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/kernel/linux/kni/compat.h	2023-04-27 18:57:22.000000000 +0100
@@ -146,6 +146,8 @@
 #define HAVE_ETH_HW_ADDR_SET
 #endif
 
-#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE
+#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE && \
+	(!(defined(RHEL_RELEASE_CODE) && \
+	 RHEL_RELEASE_VERSION(9, 1) <= RHEL_RELEASE_CODE))
 #define HAVE_NETIF_RX_NI
 #endif
diff -Nru dpdk-20.11.7/lib/librte_acl/acl_run_altivec.h dpdk-20.11.8/lib/librte_acl/acl_run_altivec.h
--- dpdk-20.11.7/lib/librte_acl/acl_run_altivec.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_acl/acl_run_altivec.h	2023-04-27 18:57:22.000000000 +0100
@@ -102,7 +102,7 @@
 /*
  * Process 4 transitions (in 2 XMM registers) in parallel
  */
-static inline __attribute__((optimize("O2"))) xmm_t
+static __rte_always_inline xmm_t
 transition4(xmm_t next_input, const uint64_t *trans,
 	xmm_t *indices1, xmm_t *indices2)
 {
diff -Nru dpdk-20.11.7/lib/librte_cmdline/cmdline.c dpdk-20.11.8/lib/librte_cmdline/cmdline.c
--- dpdk-20.11.7/lib/librte_cmdline/cmdline.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cmdline/cmdline.c	2023-04-27 18:57:22.000000000 +0100
@@ -181,6 +181,7 @@
 {
 	if (!cl)
 		return;
+	cmdline_cancel(cl);
 	rdline_quit(&cl->rdl);
 }
 
@@ -205,9 +206,14 @@
 		if (read_status < 0)
 			return read_status;
 
-		status = cmdline_in(cl, &c, 1);
-		if (status < 0 && cl->rdl.status != RDLINE_EXITED)
-			return status;
+		if (read_status == 0) {
+			/* end of file is implicit quit */
+			cmdline_quit(cl);
+		} else {
+			status = cmdline_in(cl, &c, 1);
+			if (status < 0 && cl->rdl.status != RDLINE_EXITED)
+				return status;
+		}
 	}
 
 	return cl->rdl.status;
diff -Nru dpdk-20.11.7/lib/librte_cmdline/cmdline_os_unix.c dpdk-20.11.8/lib/librte_cmdline/cmdline_os_unix.c
--- dpdk-20.11.7/lib/librte_cmdline/cmdline_os_unix.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cmdline/cmdline_os_unix.c	2023-04-27 18:57:22.000000000 +0100
@@ -51,3 +51,9 @@
 {
 	return vdprintf(fd, format, op);
 }
+
+/* This function is not needed on Linux, instead use sigaction() */
+void
+cmdline_cancel(__rte_unused struct cmdline *cl)
+{
+}
diff -Nru dpdk-20.11.7/lib/librte_cmdline/cmdline_os_windows.c dpdk-20.11.8/lib/librte_cmdline/cmdline_os_windows.c
--- dpdk-20.11.7/lib/librte_cmdline/cmdline_os_windows.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cmdline/cmdline_os_windows.c	2023-04-27 18:57:22.000000000 +0100
@@ -205,3 +205,17 @@
 
 	return ret;
 }
+
+void
+cmdline_cancel(struct cmdline *cl)
+{
+	if (!cl)
+		return;
+
+	/* force the outstanding read on console to exit */
+	if (cl->oldterm.is_console_input) {
+		HANDLE handle = (HANDLE)_get_osfhandle(cl->s_in);
+
+		CancelIoEx(handle, NULL);
+	}
+}
diff -Nru dpdk-20.11.7/lib/librte_cmdline/cmdline_private.h dpdk-20.11.8/lib/librte_cmdline/cmdline_private.h
--- dpdk-20.11.7/lib/librte_cmdline/cmdline_private.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cmdline/cmdline_private.h	2023-04-27 18:57:22.000000000 +0100
@@ -46,6 +46,9 @@
 /* Read one character from input. */
 ssize_t cmdline_read_char(struct cmdline *cl, char *c);
 
+/* Force current cmdline read to unblock. */
+void cmdline_cancel(struct cmdline *cl);
+
 /* vdprintf(3) */
 __rte_format_printf(2, 0)
 int cmdline_vdprintf(int fd, const char *format, va_list op);
diff -Nru dpdk-20.11.7/lib/librte_compressdev/rte_compressdev.h dpdk-20.11.8/lib/librte_compressdev/rte_compressdev.h
--- dpdk-20.11.7/lib/librte_compressdev/rte_compressdev.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_compressdev/rte_compressdev.h	2023-04-27 18:57:22.000000000 +0100
@@ -353,7 +353,7 @@
  * @note The capabilities field of dev_info is set to point to the first
  * element of an array of struct rte_compressdev_capabilities.
  * The element after the last valid element has it's op field set to
- * RTE_COMP_ALGO_LIST_END.
+ * RTE_COMP_ALGO_UNSPECIFIED.
  */
 __rte_experimental
 void
diff -Nru dpdk-20.11.7/lib/librte_compressdev/rte_compressdev_pmd.c dpdk-20.11.8/lib/librte_compressdev/rte_compressdev_pmd.c
--- dpdk-20.11.7/lib/librte_compressdev/rte_compressdev_pmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_compressdev/rte_compressdev_pmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -20,6 +20,9 @@
 	struct rte_compressdev_pmd_init_params *params = extra_args;
 	int n;
 
+	if (value == NULL || extra_args == NULL)
+		return -EINVAL;
+
 	n = strlcpy(params->name, value, RTE_COMPRESSDEV_NAME_MAX_LEN);
 	if (n >= RTE_COMPRESSDEV_NAME_MAX_LEN)
 		return -EINVAL;
@@ -37,6 +40,9 @@
 	int i;
 	char *end;
 
+	if (value == NULL || extra_args == NULL)
+		return -EINVAL;
+
 	errno = 0;
 	i = strtol(value, &end, 10);
 	if (*end != 0 || errno != 0 || i < 0)
diff -Nru dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev.h dpdk-20.11.8/lib/librte_cryptodev/rte_cryptodev.h
--- dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cryptodev/rte_cryptodev.h	2023-04-27 18:57:22.000000000 +0100
@@ -474,6 +474,7 @@
 rte_cryptodev_get_feature_name(uint64_t flag);
 
 /**  Crypto device information */
+/* Structure rte_cryptodev_info 8< */
 struct rte_cryptodev_info {
 	const char *driver_name;	/**< Driver name. */
 	uint8_t driver_id;		/**< Driver identifier */
@@ -502,6 +503,7 @@
 		 */
 	} sym;
 };
+/* >8 End of structure rte_cryptodev_info. */
 
 #define RTE_CRYPTODEV_DETACHED  (0)
 #define RTE_CRYPTODEV_ATTACHED  (1)
@@ -514,6 +516,7 @@
 };
 
 /** Crypto device queue pair configuration structure. */
+/* Structure rte_cryptodev_qp_conf 8<*/
 struct rte_cryptodev_qp_conf {
 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
 	struct rte_mempool *mp_session;
@@ -521,6 +524,7 @@
 	struct rte_mempool *mp_session_private;
 	/**< The mempool for creating sess private data in sessionless mode */
 };
+/* >8 End of structure rte_cryptodev_qp_conf. */
 
 /**
  * Typedef for application callback function to be registered by application
@@ -625,6 +629,7 @@
 rte_cryptodev_socket_id(uint8_t dev_id);
 
 /** Crypto device configuration structure */
+/* Structure rte_cryptodev_config 8< */
 struct rte_cryptodev_config {
 	int socket_id;			/**< Socket to allocate resources on */
 	uint16_t nb_queue_pairs;
@@ -637,6 +642,7 @@
 	 *  - RTE_CRYTPODEV_FF_SECURITY
 	 */
 };
+/* >8 End of structure rte_cryptodev_config. */
 
 /**
  * Configure a device.
diff -Nru dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.c dpdk-20.11.8/lib/librte_cryptodev/rte_cryptodev_pmd.c
--- dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cryptodev/rte_cryptodev_pmd.c	2023-04-27 18:57:22.000000000 +0100
@@ -17,6 +17,9 @@
 	struct rte_cryptodev_pmd_init_params *params = extra_args;
 	int n;
 
+	if (value == NULL || extra_args == NULL)
+		return -EINVAL;
+
 	n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
 	if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
 		return -EINVAL;
@@ -33,6 +36,10 @@
 {
 	int i;
 	char *end;
+
+	if (value == NULL || extra_args == NULL)
+		return -EINVAL;
+
 	errno = 0;
 
 	i = strtol(value, &end, 10);
diff -Nru dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.h dpdk-20.11.8/lib/librte_cryptodev/rte_cryptodev_pmd.h
--- dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cryptodev/rte_cryptodev_pmd.h	2023-04-27 18:57:22.000000000 +0100
@@ -491,9 +491,6 @@
  *  *
  * @param	dev	Pointer to cryptodev struct
  * @param	event	Crypto device interrupt event type.
- *
- * @return
- *  void
  */
 void rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
 				enum rte_cryptodev_event_type event);
diff -Nru dpdk-20.11.7/lib/librte_cryptodev/rte_crypto_sym.h dpdk-20.11.8/lib/librte_cryptodev/rte_crypto_sym.h
--- dpdk-20.11.7/lib/librte_cryptodev/rte_crypto_sym.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_cryptodev/rte_crypto_sym.h	2023-04-27 18:57:22.000000000 +0100
@@ -540,6 +540,7 @@
  * hold a single transform, the type field is used to specify which transform
  * is contained within the union
  */
+/* Structure rte_crypto_sym_xform 8< */
 struct rte_crypto_sym_xform {
 	struct rte_crypto_sym_xform *next;
 	/**< next xform in chain */
@@ -555,6 +556,7 @@
 		/**< AEAD xform */
 	};
 };
+/* >8 End of structure rte_crypto_sym_xform. */
 
 struct rte_cryptodev_sym_session;
 
@@ -588,6 +590,7 @@
  * destination buffer being at a different alignment, relative to buffer start,
  * to the data in the source buffer.
  */
+/* Structure rte_crypto_sym_op 8< */
 struct rte_crypto_sym_op {
 	struct rte_mbuf *m_src;	/**< source mbuf */
 	struct rte_mbuf *m_dst;	/**< destination mbuf */
@@ -850,6 +853,7 @@
 		};
 	};
 };
+/* >8 End of structure rte_crypto_sym_op. */
 
 
 /**
diff -Nru dpdk-20.11.7/lib/librte_eal/common/eal_common_fbarray.c dpdk-20.11.8/lib/librte_eal/common/eal_common_fbarray.c
--- dpdk-20.11.7/lib/librte_eal/common/eal_common_fbarray.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/common/eal_common_fbarray.c	2023-04-27 18:57:22.000000000 +0100
@@ -1485,7 +1485,7 @@
 
 	if (fully_validate(arr->name, arr->elt_sz, arr->len)) {
 		fprintf(f, "Invalid file-backed array\n");
-		goto out;
+		return;
 	}
 
 	/* prevent array from changing under us */
@@ -1499,6 +1499,5 @@
 
 	for (i = 0; i < msk->n_masks; i++)
 		fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]);
-out:
 	rte_rwlock_read_unlock(&arr->rwlock);
 }
diff -Nru dpdk-20.11.7/lib/librte_eal/freebsd/eal_alarm.c dpdk-20.11.8/lib/librte_eal/freebsd/eal_alarm.c
--- dpdk-20.11.7/lib/librte_eal/freebsd/eal_alarm.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/freebsd/eal_alarm.c	2023-04-27 18:57:22.000000000 +0100
@@ -148,12 +148,12 @@
 	struct timespec now;
 	struct alarm_entry *ap;
 
-	rte_spinlock_lock(&alarm_list_lk);
-	ap = LIST_FIRST(&alarm_list);
-
 	if (clock_gettime(CLOCK_TYPE_ID, &now) < 0)
 		return;
 
+	rte_spinlock_lock(&alarm_list_lk);
+	ap = LIST_FIRST(&alarm_list);
+
 	while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) {
 		ap->executing = 1;
 		ap->executing_id = pthread_self();
diff -Nru dpdk-20.11.7/lib/librte_eal/freebsd/eal_hugepage_info.c dpdk-20.11.8/lib/librte_eal/freebsd/eal_hugepage_info.c
--- dpdk-20.11.7/lib/librte_eal/freebsd/eal_hugepage_info.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/freebsd/eal_hugepage_info.c	2023-04-27 18:57:22.000000000 +0100
@@ -33,7 +33,7 @@
 	}
 	retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 	close(fd);
-	return retval;
+	return retval == MAP_FAILED ? NULL : retval;
 }
 
 static void *
diff -Nru dpdk-20.11.7/lib/librte_eal/include/generic/rte_atomic.h dpdk-20.11.8/lib/librte_eal/include/generic/rte_atomic.h
--- dpdk-20.11.7/lib/librte_eal/include/generic/rte_atomic.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/include/generic/rte_atomic.h	2023-04-27 18:57:22.000000000 +0100
@@ -175,11 +175,7 @@
 static inline uint16_t
 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
 {
-#if defined(__clang__)
 	return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
-#else
-	return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
-#endif
 }
 #endif
 
@@ -458,11 +454,7 @@
 static inline uint32_t
 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
 {
-#if defined(__clang__)
 	return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
-#else
-	return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
-#endif
 }
 #endif
 
@@ -740,11 +732,7 @@
 static inline uint64_t
 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
 {
-#if defined(__clang__)
 	return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
-#else
-	return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
-#endif
 }
 #endif
 
diff -Nru dpdk-20.11.7/lib/librte_eal/include/rte_bitmap.h dpdk-20.11.8/lib/librte_eal/include/rte_bitmap.h
--- dpdk-20.11.7/lib/librte_eal/include/rte_bitmap.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/include/rte_bitmap.h	2023-04-27 18:57:22.000000000 +0100
@@ -327,8 +327,6 @@
  *   Handle to bitmap instance
  * @param pos
  *   Bit position
- * @return
- *   0 upon success, error code otherwise
  */
 static inline void
 rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos)
diff -Nru dpdk-20.11.7/lib/librte_eal/include/rte_hexdump.h dpdk-20.11.8/lib/librte_eal/include/rte_hexdump.h
--- dpdk-20.11.7/lib/librte_eal/include/rte_hexdump.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/include/rte_hexdump.h	2023-04-27 18:57:22.000000000 +0100
@@ -27,8 +27,6 @@
 *		This is the buffer address to print out.
 * @param len
 *		The number of bytes to dump out
-* @return
-*		None.
 */
 
 extern void
@@ -45,8 +43,6 @@
 *		This is the buffer address to print out.
 * @param len
 *		The number of bytes to dump out
-* @return
-*		None.
 */
 
 void
diff -Nru dpdk-20.11.7/lib/librte_eal/include/rte_service_component.h dpdk-20.11.8/lib/librte_eal/include/rte_service_component.h
--- dpdk-20.11.7/lib/librte_eal/include/rte_service_component.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/include/rte_service_component.h	2023-04-27 18:57:22.000000000 +0100
@@ -130,8 +130,6 @@
 /**
  * @internal Free up the memory that has been initialized.
  * This routine is to be invoked prior to process termination.
- *
- * @retval None
  */
 void rte_service_finalize(void);
 
diff -Nru dpdk-20.11.7/lib/librte_eal/linux/eal_hugepage_info.c dpdk-20.11.8/lib/librte_eal/linux/eal_hugepage_info.c
--- dpdk-20.11.7/lib/librte_eal/linux/eal_hugepage_info.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/linux/eal_hugepage_info.c	2023-04-27 18:57:22.000000000 +0100
@@ -57,7 +57,7 @@
 	retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
 			MAP_SHARED, fd, 0);
 	close(fd);
-	return retval;
+	return retval == MAP_FAILED ? NULL : retval;
 }
 
 static void *
diff -Nru dpdk-20.11.7/lib/librte_eal/windows/eal.c dpdk-20.11.8/lib/librte_eal/windows/eal.c
--- dpdk-20.11.7/lib/librte_eal/windows/eal.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/windows/eal.c	2023-04-27 18:57:22.000000000 +0100
@@ -437,6 +437,9 @@
 	 */
 	rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
 	rte_eal_mp_wait_lcore();
+
+	eal_mcfg_complete();
+
 	return fctret;
 }
 
diff -Nru dpdk-20.11.7/lib/librte_eal/windows/include/pthread.h dpdk-20.11.8/lib/librte_eal/windows/include/pthread.h
--- dpdk-20.11.7/lib/librte_eal/windows/include/pthread.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eal/windows/include/pthread.h	2023-04-27 18:57:22.000000000 +0100
@@ -134,7 +134,8 @@
 {
 	RTE_SET_USED(threadattr);
 	HANDLE hThread;
-	hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc,
+	hThread = CreateThread(NULL, 0,
+		(LPTHREAD_START_ROUTINE)(uintptr_t)threadfunc,
 		args, 0, (LPDWORD)threadid);
 	if (hThread) {
 		SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);
diff -Nru dpdk-20.11.7/lib/librte_eventdev/rte_event_eth_tx_adapter.c dpdk-20.11.8/lib/librte_eventdev/rte_event_eth_tx_adapter.c
--- dpdk-20.11.7/lib/librte_eventdev/rte_event_eth_tx_adapter.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eventdev/rte_event_eth_tx_adapter.c	2023-04-27 18:57:22.000000000 +0100
@@ -582,7 +582,7 @@
 		RTE_ETH_FOREACH_DEV(i) {
 			uint16_t q;
 
-			if (i == txa->dev_count)
+			if (i >= txa->dev_count)
 				break;
 
 			dev = tdi[i].dev;
diff -Nru dpdk-20.11.7/lib/librte_eventdev/rte_event_timer_adapter.c dpdk-20.11.8/lib/librte_eventdev/rte_event_timer_adapter.c
--- dpdk-20.11.7/lib/librte_eventdev/rte_event_timer_adapter.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_eventdev/rte_event_timer_adapter.c	2023-04-27 18:57:22.000000000 +0100
@@ -19,6 +19,7 @@
 #include <rte_timer.h>
 #include <rte_service_component.h>
 #include <rte_cycles.h>
+#include <rte_reciprocal.h>
 
 #include "rte_eventdev.h"
 #include "rte_eventdev_pmd.h"
@@ -645,13 +646,51 @@
 	}
 }
 
-static __rte_always_inline uint64_t
+static __rte_always_inline int
 get_timeout_cycles(struct rte_event_timer *evtim,
-		   const struct rte_event_timer_adapter *adapter)
+		   const struct rte_event_timer_adapter *adapter,
+		   uint64_t *timeout_cycles)
 {
-	struct swtim *sw = swtim_pmd_priv(adapter);
-	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
-	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+	static struct rte_reciprocal_u64 nsecpersec_inverse;
+	static uint64_t timer_hz;
+	uint64_t rem_cycles, secs_cycles = 0;
+	uint64_t secs, timeout_nsecs;
+	uint64_t nsecpersec;
+	struct swtim *sw;
+
+	sw = swtim_pmd_priv(adapter);
+	nsecpersec = (uint64_t)NSECPERSEC;
+
+	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
+	if (timeout_nsecs > sw->max_tmo_ns)
+		return -1;
+	if (timeout_nsecs < sw->timer_tick_ns)
+		return -2;
+
+	/* Set these values in the first invocation */
+	if (!timer_hz) {
+		timer_hz = rte_get_timer_hz();
+		nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
+	}
+
+	/* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
+	 * of whole seconds it contains and convert that value to a number
+	 * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
+	 * in order to avoid overflow when we later multiply by timer_hz.
+	 */
+	if (timeout_nsecs > nsecpersec) {
+		secs = rte_reciprocal_divide_u64(timeout_nsecs,
+						 &nsecpersec_inverse);
+		secs_cycles = secs * timer_hz;
+		timeout_nsecs -= secs * nsecpersec;
+	}
+
+	rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
+					       &nsecpersec_inverse);
+
+	*timeout_cycles = secs_cycles + rem_cycles;
+
+	return 0;
 }
 
 /* This function returns true if one or more (adapter) ticks have occurred since
@@ -685,23 +724,6 @@
 	return false;
 }
 
-/* Check that event timer timeout value is in range */
-static __rte_always_inline int
-check_timeout(struct rte_event_timer *evtim,
-	      const struct rte_event_timer_adapter *adapter)
-{
-	uint64_t tmo_nsec;
-	struct swtim *sw = swtim_pmd_priv(adapter);
-
-	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
-	if (tmo_nsec > sw->max_tmo_ns)
-		return -1;
-	if (tmo_nsec < sw->timer_tick_ns)
-		return -2;
-
-	return 0;
-}
-
 /* Check that event timer event queue sched type matches destination event queue
  * sched type
  */
@@ -1072,21 +1094,6 @@
 			break;
 		}
 
-		ret = check_timeout(evtims[i], adapter);
-		if (unlikely(ret == -1)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOLATE,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		} else if (unlikely(ret == -2)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOEARLY,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		}
-
 		if (unlikely(check_destination_event_queue(evtims[i],
 							   adapter) < 0)) {
 			__atomic_store_n(&evtims[i]->state,
@@ -1102,7 +1109,21 @@
 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
 
-		cycles = get_timeout_cycles(evtims[i], adapter);
+		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
+		if (unlikely(ret == -1)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOLATE,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		} else if (unlikely(ret == -2)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOEARLY,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		}
+
 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
 					  SINGLE, lcore_id, NULL, evtims[i]);
 		if (ret < 0) {
diff -Nru dpdk-20.11.7/lib/librte_fib/rte_fib6.h dpdk-20.11.8/lib/librte_fib/rte_fib6.h
--- dpdk-20.11.7/lib/librte_fib/rte_fib6.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_fib/rte_fib6.h	2023-04-27 18:57:22.000000000 +0100
@@ -119,8 +119,6 @@
  *
  * @param fib
  *   FIB object handle
- * @return
- *   None
  */
 __rte_experimental
 void
diff -Nru dpdk-20.11.7/lib/librte_fib/rte_fib.h dpdk-20.11.8/lib/librte_fib/rte_fib.h
--- dpdk-20.11.7/lib/librte_fib/rte_fib.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_fib/rte_fib.h	2023-04-27 18:57:22.000000000 +0100
@@ -128,8 +128,6 @@
  *
  * @param fib
  *   FIB object handle
- * @return
- *   None
  */
 __rte_experimental
 void
diff -Nru dpdk-20.11.7/lib/librte_graph/node.c dpdk-20.11.8/lib/librte_graph/node.c
--- dpdk-20.11.7/lib/librte_graph/node.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_graph/node.c	2023-04-27 18:57:22.000000000 +0100
@@ -310,16 +310,16 @@
 		if (node->id == id) {
 			if (node->nb_edges < size) {
 				rte_errno = E2BIG;
-				goto fail;
+			} else {
+				node->nb_edges = size;
+				rc = size;
 			}
-			node->nb_edges = size;
-			rc = size;
 			break;
 		}
 	}
 
-fail:
 	graph_spinlock_unlock();
+fail:
 	return rc;
 }
 
diff -Nru dpdk-20.11.7/lib/librte_ipsec/rte_ipsec_sad.h dpdk-20.11.8/lib/librte_ipsec/rte_ipsec_sad.h
--- dpdk-20.11.7/lib/librte_ipsec/rte_ipsec_sad.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_ipsec/rte_ipsec_sad.h	2023-04-27 18:57:22.000000000 +0100
@@ -139,8 +139,6 @@
  *
  * @param sad
  *   pointer to the SAD object
- * @return
- *   None
  */
 void
 rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad);
diff -Nru dpdk-20.11.7/lib/librte_kni/rte_kni.c dpdk-20.11.8/lib/librte_kni/rte_kni.c
--- dpdk-20.11.7/lib/librte_kni/rte_kni.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_kni/rte_kni.c	2023-04-27 18:57:22.000000000 +0100
@@ -635,8 +635,8 @@
 {
 	unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
 
-	/* If buffers removed, allocate mbufs and then put them into alloc_q */
-	if (ret)
+	/* If buffers removed or alloc_q is empty, allocate mbufs and then put them into alloc_q */
+	if (ret || (kni_fifo_count(kni->alloc_q) == 0))
 		kni_allocate_mbufs(kni);
 
 	return ret;
diff -Nru dpdk-20.11.7/lib/librte_kvargs/rte_kvargs.h dpdk-20.11.8/lib/librte_kvargs/rte_kvargs.h
--- dpdk-20.11.7/lib/librte_kvargs/rte_kvargs.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_kvargs/rte_kvargs.h	2023-04-27 18:57:22.000000000 +0100
@@ -36,7 +36,19 @@
 /** separator character used between key and value */
 #define RTE_KVARGS_KV_DELIM	"="
 
-/** Type of callback function used by rte_kvargs_process() */
+/**
+ * Callback prototype used by rte_kvargs_process().
+ *
+ * @param key
+ *   The key to consider, it will not be NULL.
+ * @param value
+ *   The value corresponding to the key, it may be NULL (e.g. only with key)
+ * @param opaque
+ *   An opaque pointer coming from the caller.
+ * @return
+ *   - >=0 handle key success.
+ *   - <0 on error.
+ */
 typedef int (*arg_handler_t)(const char *key, const char *value, void *opaque);
 
 /** A key/value association */
diff -Nru dpdk-20.11.7/lib/librte_lpm/rte_lpm6.h dpdk-20.11.8/lib/librte_lpm/rte_lpm6.h
--- dpdk-20.11.7/lib/librte_lpm/rte_lpm6.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_lpm/rte_lpm6.h	2023-04-27 18:57:22.000000000 +0100
@@ -73,8 +73,6 @@
  *
  * @param lpm
  *   LPM object handle
- * @return
- *   None
  */
 void
 rte_lpm6_free(struct rte_lpm6 *lpm);
diff -Nru dpdk-20.11.7/lib/librte_lpm/rte_lpm.h dpdk-20.11.8/lib/librte_lpm/rte_lpm.h
--- dpdk-20.11.7/lib/librte_lpm/rte_lpm.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_lpm/rte_lpm.h	2023-04-27 18:57:22.000000000 +0100
@@ -183,8 +183,6 @@
  *
  * @param lpm
  *   LPM object handle
- * @return
- *   None
  */
 void
 rte_lpm_free(struct rte_lpm *lpm);
diff -Nru dpdk-20.11.7/lib/librte_mempool/rte_mempool.h dpdk-20.11.8/lib/librte_mempool/rte_mempool.h
--- dpdk-20.11.7/lib/librte_mempool/rte_mempool.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_mempool/rte_mempool.h	2023-04-27 18:57:22.000000000 +0100
@@ -1403,61 +1403,82 @@
  *   A pointer to a mempool cache structure. May be NULL if not needed.
  * @return
  *   - >=0: Success; number of objects supplied.
- *   - <0: Error; code of ring dequeue function.
+ *   - <0: Error; code of driver dequeue function.
  */
 static __rte_always_inline int
 __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 		      unsigned int n, struct rte_mempool_cache *cache)
 {
 	int ret;
+	unsigned int remaining = n;
 	uint32_t index, len;
 	void **cache_objs;
 
-	/* No cache provided or cannot be satisfied from cache */
-	if (unlikely(cache == NULL || n >= cache->size))
-		goto ring_dequeue;
-
-	cache_objs = cache->objs;
-
-	/* Can this be satisfied from the cache? */
-	if (cache->len < n) {
-		/* No. Backfill the cache first, and then fill from it */
-		uint32_t req = n + (cache->size - cache->len);
-
-		/* How many do we require i.e. number to fill the cache + the request */
-		ret = rte_mempool_ops_dequeue_bulk(mp,
-			&cache->objs[cache->len], req);
-		if (unlikely(ret < 0)) {
-			/*
-			 * In the off chance that we are buffer constrained,
-			 * where we are not able to allocate cache + n, go to
-			 * the ring directly. If that fails, we are truly out of
-			 * buffers.
-			 */
-			goto ring_dequeue;
-		}
+	/* No cache provided */
+	if (unlikely(cache == NULL))
+		goto driver_dequeue;
+
+	/* Use the cache as much as we have to return hot objects first */
+	len = RTE_MIN(remaining, cache->len);
+	cache_objs = &cache->objs[cache->len];
+	cache->len -= len;
+	remaining -= len;
+	for (index = 0; index < len; index++)
+		*obj_table++ = *--cache_objs;
+
+	if (remaining == 0) {
+		/* The entire request is satisfied from the cache. */
+
+		__MEMPOOL_STAT_ADD(mp, get_success, n);
 
-		cache->len += req;
+		return 0;
 	}
 
-	/* Now fill in the response ... */
-	for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
-		*obj_table = cache_objs[len];
+	/* if dequeue below would overflow mem allocated for cache */
+	if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
+		goto driver_dequeue;
+
+	/* Fill the cache from the backend; fetch size + remaining objects. */
+	ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
+			cache->size + remaining);
+	if (unlikely(ret < 0)) {
+		/*
+		 * We are buffer constrained, and not able to allocate
+		 * cache + remaining.
+		 * Do not fill the cache, just satisfy the remaining part of
+		 * the request directly from the backend.
+		 */
+		goto driver_dequeue;
+	}
+
+	/* Satisfy the remaining part of the request from the filled cache. */
+	cache_objs = &cache->objs[cache->size + remaining];
+	for (index = 0; index < remaining; index++)
+		*obj_table++ = *--cache_objs;
 
-	cache->len -= n;
+	cache->len = cache->size;
 
 	__MEMPOOL_STAT_ADD(mp, get_success, n);
 
 	return 0;
 
-ring_dequeue:
+driver_dequeue:
+
+	/* Get remaining objects directly from the backend. */
+	ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
 
-	/* get remaining objects from ring */
-	ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
+	if (ret < 0) {
+		if (likely(cache != NULL)) {
+			cache->len = n - remaining;
+			/*
+			 * No further action is required to roll the first part
+			 * of the request back into the cache, as objects in
+			 * the cache are intact.
+			 */
+		}
 
-	if (ret < 0)
 		__MEMPOOL_STAT_ADD(mp, get_fail, n);
-	else
+	} else
 		__MEMPOOL_STAT_ADD(mp, get_success, n);
 
 	return ret;
diff -Nru dpdk-20.11.7/lib/librte_pdump/rte_pdump.c dpdk-20.11.8/lib/librte_pdump/rte_pdump.c
--- dpdk-20.11.7/lib/librte_pdump/rte_pdump.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_pdump/rte_pdump.c	2023-04-27 18:57:22.000000000 +0100
@@ -91,7 +91,7 @@
 			dup_bufs[d_pkts++] = p;
 	}
 
-	ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
+	ring_enq = rte_ring_enqueue_burst(ring, (void *)&dup_bufs[0], d_pkts, NULL);
 	if (unlikely(ring_enq < d_pkts)) {
 		PDUMP_LOG(DEBUG,
 			"only %d of packets enqueued to ring\n", ring_enq);
diff -Nru dpdk-20.11.7/lib/librte_rcu/rte_rcu_qsbr.h dpdk-20.11.8/lib/librte_rcu/rte_rcu_qsbr.h
--- dpdk-20.11.7/lib/librte_rcu/rte_rcu_qsbr.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_rcu/rte_rcu_qsbr.h	2023-04-27 18:57:22.000000000 +0100
@@ -130,9 +130,6 @@
  *   Pointer to the resource data stored on the defer queue
  * @param n
  *   Number of resources to free. Currently, this is set to 1.
- *
- * @return
- *   None
  */
 typedef void (*rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n);
 
diff -Nru dpdk-20.11.7/lib/librte_reorder/rte_reorder.c dpdk-20.11.8/lib/librte_reorder/rte_reorder.c
--- dpdk-20.11.7/lib/librte_reorder/rte_reorder.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_reorder/rte_reorder.c	2023-04-27 18:57:22.000000000 +0100
@@ -61,6 +61,11 @@
 {
 	const unsigned int min_bufsize = sizeof(*b) +
 					(2 * size * sizeof(struct rte_mbuf *));
+	static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = {
+		.name = RTE_REORDER_SEQN_DYNFIELD_NAME,
+		.size = sizeof(rte_reorder_seqn_t),
+		.align = __alignof__(rte_reorder_seqn_t),
+	};
 
 	if (b == NULL) {
 		RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:"
@@ -87,6 +92,15 @@
 		return NULL;
 	}
 
+	rte_reorder_seqn_dynfield_offset = rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc);
+	if (rte_reorder_seqn_dynfield_offset < 0) {
+		RTE_LOG(ERR, REORDER,
+			"Failed to register mbuf field for reorder sequence number, rte_errno: %i\n",
+			rte_errno);
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
 	memset(b, 0, bufsize);
 	strlcpy(b->name, name, sizeof(b->name));
 	b->memsize = bufsize;
@@ -99,21 +113,45 @@
 	return b;
 }
 
+/*
+ * Insert new entry into global list.
+ * Returns pointer to already inserted entry if such exists, or to newly inserted one.
+ */
+static struct rte_tailq_entry *
+rte_reorder_entry_insert(struct rte_tailq_entry *new_te)
+{
+	struct rte_reorder_list *reorder_list;
+	struct rte_reorder_buffer *b, *nb;
+	struct rte_tailq_entry *te;
+
+	rte_mcfg_tailq_write_lock();
+
+	reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
+	/* guarantee there's no existing */
+	TAILQ_FOREACH(te, reorder_list, next) {
+		b = (struct rte_reorder_buffer *) te->data;
+		nb = (struct rte_reorder_buffer *) new_te->data;
+		if (strncmp(nb->name, b->name, RTE_REORDER_NAMESIZE) == 0)
+			break;
+	}
+
+	if (te == NULL) {
+		TAILQ_INSERT_TAIL(reorder_list, new_te, next);
+		te = new_te;
+	}
+
+	rte_mcfg_tailq_write_unlock();
+
+	return te;
+}
+
 struct rte_reorder_buffer*
 rte_reorder_create(const char *name, unsigned socket_id, unsigned int size)
 {
 	struct rte_reorder_buffer *b = NULL;
-	struct rte_tailq_entry *te;
-	struct rte_reorder_list *reorder_list;
+	struct rte_tailq_entry *te, *te_inserted;
 	const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
 					(2 * size * sizeof(struct rte_mbuf *));
-	static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = {
-		.name = RTE_REORDER_SEQN_DYNFIELD_NAME,
-		.size = sizeof(rte_reorder_seqn_t),
-		.align = __alignof__(rte_reorder_seqn_t),
-	};
-
-	reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
 
 	/* Check user arguments. */
 	if (!rte_is_power_of_2(size)) {
@@ -129,32 +167,12 @@
 		return NULL;
 	}
 
-	rte_reorder_seqn_dynfield_offset =
-		rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc);
-	if (rte_reorder_seqn_dynfield_offset < 0) {
-		RTE_LOG(ERR, REORDER, "Failed to register mbuf field for reorder sequence number\n");
-		rte_errno = ENOMEM;
-		return NULL;
-	}
-
-	rte_mcfg_tailq_write_lock();
-
-	/* guarantee there's no existing */
-	TAILQ_FOREACH(te, reorder_list, next) {
-		b = (struct rte_reorder_buffer *) te->data;
-		if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
-			break;
-	}
-	if (te != NULL)
-		goto exit;
-
 	/* allocate tailq entry */
 	te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0);
 	if (te == NULL) {
 		RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n");
 		rte_errno = ENOMEM;
-		b = NULL;
-		goto exit;
+		return NULL;
 	}
 
 	/* Allocate memory to store the reorder buffer structure. */
@@ -163,14 +181,23 @@
 		RTE_LOG(ERR, REORDER, "Memzone allocation failed\n");
 		rte_errno = ENOMEM;
 		rte_free(te);
+		return NULL;
 	} else {
-		rte_reorder_init(b, bufsize, name, size);
+		if (rte_reorder_init(b, bufsize, name, size) == NULL) {
+			rte_free(b);
+			rte_free(te);
+			return NULL;
+		}
 		te->data = (void *)b;
-		TAILQ_INSERT_TAIL(reorder_list, te, next);
 	}
 
-exit:
-	rte_mcfg_tailq_write_unlock();
+	te_inserted = rte_reorder_entry_insert(te);
+	if (te_inserted != te) {
+		rte_free(b);
+		rte_free(te);
+		return te_inserted->data;
+	}
+
 	return b;
 }
 
@@ -392,6 +419,7 @@
 	/* Try to fetch requested number of mbufs from ready buffer */
 	while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) {
 		mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail];
+		ready_buf->entries[ready_buf->tail] = NULL;
 		ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask;
 	}
 
diff -Nru dpdk-20.11.7/lib/librte_reorder/rte_reorder.h dpdk-20.11.8/lib/librte_reorder/rte_reorder.h
--- dpdk-20.11.7/lib/librte_reorder/rte_reorder.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_reorder/rte_reorder.h	2023-04-27 18:57:22.000000000 +0100
@@ -81,6 +81,7 @@
  *   The initialized reorder buffer instance, or NULL on error
  *   On error case, rte_errno will be set appropriately:
  *    - EINVAL - invalid parameters
+ *    - ENOMEM - not enough memory to register dynamic field
  */
 struct rte_reorder_buffer *
 rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
@@ -115,8 +116,6 @@
  *
  * @param b
  *   reorder buffer instance
- * @return
- *   None
  */
 void
 rte_reorder_free(struct rte_reorder_buffer *b);
diff -Nru dpdk-20.11.7/lib/librte_rib/rte_rib6.h dpdk-20.11.8/lib/librte_rib/rte_rib6.h
--- dpdk-20.11.7/lib/librte_rib/rte_rib6.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_rib/rte_rib6.h	2023-04-27 18:57:22.000000000 +0100
@@ -337,8 +337,6 @@
  *
  * @param rib
  *   RIB object handle
- * @return
- *   None
  */
 __rte_experimental
 void
diff -Nru dpdk-20.11.7/lib/librte_rib/rte_rib.h dpdk-20.11.8/lib/librte_rib/rte_rib.h
--- dpdk-20.11.7/lib/librte_rib/rte_rib.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_rib/rte_rib.h	2023-04-27 18:57:22.000000000 +0100
@@ -282,8 +282,6 @@
  *
  * @param rib
  *   RIB object handle
- * @return
- *   None
  */
 __rte_experimental
 void
diff -Nru dpdk-20.11.7/lib/librte_ring/rte_ring_elem.h dpdk-20.11.8/lib/librte_ring/rte_ring_elem.h
--- dpdk-20.11.7/lib/librte_ring/rte_ring_elem.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_ring/rte_ring_elem.h	2023-04-27 18:57:22.000000000 +0100
@@ -104,6 +104,12 @@
 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
 			unsigned int count, int socket_id, unsigned int flags);
 
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstringop-overflow"
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
 static __rte_always_inline void
 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
 		uint32_t idx, const void *obj_table, uint32_t n)
@@ -1076,6 +1082,10 @@
 	return 0;
 }
 
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
+#pragma GCC diagnostic pop
+#endif
+
 #ifdef ALLOW_EXPERIMENTAL_API
 #include <rte_ring_peek.h>
 #include <rte_ring_peek_zc.h>
diff -Nru dpdk-20.11.7/lib/librte_telemetry/telemetry.c dpdk-20.11.8/lib/librte_telemetry/telemetry.c
--- dpdk-20.11.7/lib/librte_telemetry/telemetry.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_telemetry/telemetry.c	2023-04-27 18:57:22.000000000 +0100
@@ -281,7 +281,7 @@
 static void
 perform_command(telemetry_cb fn, const char *cmd, const char *param, int s)
 {
-	struct rte_tel_data data;
+	struct rte_tel_data data = {0};
 
 	int ret = fn(cmd, param, &data);
 	if (ret < 0) {
diff -Nru dpdk-20.11.7/lib/librte_vhost/rte_vhost_async.h dpdk-20.11.8/lib/librte_vhost/rte_vhost_async.h
--- dpdk-20.11.7/lib/librte_vhost/rte_vhost_async.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_vhost/rte_vhost_async.h	2023-04-27 18:57:22.000000000 +0100
@@ -48,15 +48,15 @@
 	/**
 	 * instruct async engines to perform copies for a batch of packets
 	 *
-	 * @param vid
+	 *  vid
 	 *  id of vhost device to perform data copies
-	 * @param queue_id
+	 *  queue_id
 	 *  queue id to perform data copies
-	 * @param descs
+	 *  descs
 	 *  an array of DMA transfer memory descriptors
-	 * @param opaque_data
+	 *  opaque_data
 	 *  opaque data pair sending to DMA engine
-	 * @param count
+	 *  count
 	 *  number of elements in the "descs" array
 	 * @return
 	 *  number of descs processed
@@ -67,13 +67,13 @@
 		uint16_t count);
 	/**
 	 * check copy-completed packets from the async engine
-	 * @param vid
+	 *  vid
 	 *  id of vhost device to check copy completion
-	 * @param queue_id
+	 *  queue_id
 	 *  queue id to check copy completion
-	 * @param opaque_data
+	 *  opaque_data
 	 *  buffer to receive the opaque data pair from DMA engine
-	 * @param max_packets
+	 *  max_packets
 	 *  max number of packets could be completed
 	 * @return
 	 *  number of async descs completed
diff -Nru dpdk-20.11.7/lib/librte_vhost/socket.c dpdk-20.11.8/lib/librte_vhost/socket.c
--- dpdk-20.11.7/lib/librte_vhost/socket.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_vhost/socket.c	2023-04-27 18:57:22.000000000 +0100
@@ -128,10 +128,12 @@
 		return ret;
 	}
 
-	if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
+	if (msgh.msg_flags & MSG_TRUNC)
 		VHOST_LOG_CONFIG(ERR, "truncated msg\n");
-		return -1;
-	}
+
+	/* MSG_CTRUNC may be caused by LSM misconfiguration */
+	if (msgh.msg_flags & MSG_CTRUNC)
+		VHOST_LOG_CONFIG(ERR, "truncated control data (fd %d)\n", sockfd);
 
 	for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
 		cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
diff -Nru dpdk-20.11.7/lib/librte_vhost/vhost.h dpdk-20.11.8/lib/librte_vhost/vhost.h
--- dpdk-20.11.7/lib/librte_vhost/vhost.h	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_vhost/vhost.h	2023-04-27 18:57:22.000000000 +0100
@@ -663,7 +663,10 @@
 static __rte_always_inline struct virtio_net *
 get_device(int vid)
 {
-	struct virtio_net *dev = vhost_devices[vid];
+	struct virtio_net *dev = NULL;
+
+	if (likely(vid >= 0 && vid < MAX_VHOST_DEVICE))
+		dev = vhost_devices[vid];
 
 	if (unlikely(!dev)) {
 		VHOST_LOG_CONFIG(ERR,
diff -Nru dpdk-20.11.7/lib/librte_vhost/vhost_user.c dpdk-20.11.8/lib/librte_vhost/vhost_user.c
--- dpdk-20.11.7/lib/librte_vhost/vhost_user.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_vhost/vhost_user.c	2023-04-27 18:57:22.000000000 +0100
@@ -1679,7 +1679,7 @@
 
 	if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
 		close(msg->fds[0]);
-	VHOST_LOG_CONFIG(INFO, "not implemented\n");
+	VHOST_LOG_CONFIG(DEBUG, "not implemented\n");
 
 	return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -2191,7 +2191,7 @@
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	close(msg->fds[0]);
-	VHOST_LOG_CONFIG(INFO, "not implemented.\n");
+	VHOST_LOG_CONFIG(DEBUG, "not implemented.\n");
 
 	return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -2609,30 +2609,37 @@
 
 	ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
 		msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
-	if (ret <= 0) {
-		return ret;
-	} else if (ret != VHOST_USER_HDR_SIZE) {
+	if (ret <= 0)
+		goto out;
+
+	if (ret != VHOST_USER_HDR_SIZE) {
 		VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
-		close_msg_fds(msg);
-		return -1;
+		ret = -1;
+		goto out;
 	}
 
 	if (msg->size) {
 		if (msg->size > sizeof(msg->payload)) {
 			VHOST_LOG_CONFIG(ERR,
 				"invalid msg size: %d\n", msg->size);
-			return -1;
+			ret = -1;
+			goto out;
 		}
 		ret = read(sockfd, &msg->payload, msg->size);
 		if (ret <= 0)
-			return ret;
+			goto out;
 		if (ret != (int)msg->size) {
 			VHOST_LOG_CONFIG(ERR,
 				"read control message failed\n");
-			return -1;
+			ret = -1;
+			goto out;
 		}
 	}
 
+out:
+	if (ret <= 0)
+		close_msg_fds(msg);
+
 	return ret;
 }
 
@@ -2778,6 +2785,7 @@
 		}
 	}
 
+	msg.request.master = VHOST_USER_NONE;
 	ret = read_vhost_message(fd, &msg);
 	if (ret <= 0) {
 		if (ret < 0)
diff -Nru dpdk-20.11.7/lib/librte_vhost/virtio_net.c dpdk-20.11.8/lib/librte_vhost/virtio_net.c
--- dpdk-20.11.7/lib/librte_vhost/virtio_net.c	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/lib/librte_vhost/virtio_net.c	2023-04-27 18:57:22.000000000 +0100
@@ -1329,6 +1329,12 @@
 			sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	}
 
+	if (rxvq_is_mergeable(dev)) {
+		vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+			ASSIGN_UNLESS_EQUAL(hdrs[i]->num_buffers, 1);
+		}
+	}
+
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
 		virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
 
diff -Nru dpdk-20.11.7/VERSION dpdk-20.11.8/VERSION
--- dpdk-20.11.7/VERSION	2022-12-13 10:50:22.000000000 +0000
+++ dpdk-20.11.8/VERSION	2023-04-27 18:57:22.000000000 +0100
@@ -1 +1 @@
-20.11.7
+20.11.8

Attachment: signature.asc
Description: This is a digitally signed message part


--- End Message ---
--- Begin Message ---
Package: release.debian.org
Version: 11.8

Hi,

The updates referred to by each of these requests were included in
today's 11.8 bullseye point release.

Regards,

Adam

--- End Message ---

Reply to: