[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#1052402: bullseye-pu: package dpdk/20.11.9-1~deb11u1



Package: release.debian.org
Severity: normal
Tags: bullseye
User: release.debian.org@packages.debian.org
Usertags: pu
X-Debbugs-CC: pkg-dpdk-devel@lists.alioth.debian.org

Dear release team,

We would like to upload a new LTS release version of DPDK to Bullseye.
We have already done this previously for Buster and Bullseye, therefore
I already proceeded to upload to bullseye-pu.

As before, the LTS point release has only bug fixes and no API
breakages and has been tested with regression tests.

The source debdiff is attached.

-- 
Kind regards,
Luca Boccassi
diff -Nru dpdk-20.11.8/app/test/meson.build dpdk-20.11.9/app/test/meson.build
--- dpdk-20.11.8/app/test/meson.build	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/meson.build	2023-08-15 16:54:57.000000000 +0100
@@ -210,6 +210,7 @@
         ['fib6_autotest', true],
         ['func_reentrancy_autotest', false],
         ['flow_classify_autotest', false],
+        ['graph_autotest', true],
         ['hash_autotest', true],
         ['interrupt_autotest', true],
         ['ipfrag_autotest', false],
@@ -226,6 +227,7 @@
         ['memzone_autotest', false],
         ['meter_autotest', true],
         ['multiprocess_autotest', false],
+        ['node_list_dump', true],
         ['per_lcore_autotest', true],
         ['prefetch_autotest', true],
         ['rcu_qsbr_autotest', true],
@@ -303,6 +305,7 @@
         'hash_readwrite_lf_perf_autotest',
         'trace_perf_autotest',
 	'ipsec_perf_autotest',
+        'graph_perf_autotest',
 ]
 
 driver_test_names = [
diff -Nru dpdk-20.11.8/app/test/test_cryptodev.c dpdk-20.11.9/app/test/test_cryptodev.c
--- dpdk-20.11.8/app/test/test_cryptodev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/test_cryptodev.c	2023-08-15 16:54:57.000000000 +0100
@@ -6386,6 +6386,7 @@
 static int
 test_snow3g_decryption_with_digest_test_case_1(void)
 {
+	int ret;
 	struct snow3g_hash_test_data snow3g_hash_data;
 
 	/*
@@ -6394,8 +6395,9 @@
 	 */
 	snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data);
 
-	if (test_snow3g_decryption(&snow3g_test_case_7))
-		return TEST_FAILED;
+	ret = test_snow3g_decryption(&snow3g_test_case_7);
+	if (ret != 0)
+		return ret;
 
 	return test_snow3g_authentication_verify(&snow3g_hash_data);
 }
@@ -7709,7 +7711,7 @@
 			tdata->key.data, tdata->key.len,
 			tdata->aad.len, tdata->auth_tag.len,
 			tdata->iv.len);
-	if (retval < 0)
+	if (retval != TEST_SUCCESS)
 		return retval;
 
 	if (tdata->aad.len > MBUF_SIZE) {
@@ -9762,7 +9764,7 @@
 			tdata->key.data, tdata->key.len,
 			tdata->aad.len, tdata->auth_tag.len,
 			tdata->iv.len);
-	if (retval < 0)
+	if (retval != TEST_SUCCESS)
 		return retval;
 
 	/* alloc mbuf and set payload */
diff -Nru dpdk-20.11.8/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h dpdk-20.11.9/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h
--- dpdk-20.11.8/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h	2023-08-15 16:54:57.000000000 +0100
@@ -769,7 +769,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -781,7 +781,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7,
 				     0xf5, 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce,
 				     0xbe, 0x48, 0xb5, 0x0b, 0x6a, 0x73, 0x9a,
 				     0x5a, 0xa3, 0x06, 0x47, 0x40, 0x96, 0xcf,
@@ -790,7 +790,7 @@
 				     0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, 0xe8,
 				     0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2,
 				     0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c,
-				     0x23, 0xfa, 0x16, 0x39, 0xf7, 0x15, 0x11 },
+				     0x23, 0xfa, 0x16, 0xb2, 0xb0, 0x17, 0x4a },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -817,7 +817,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -829,7 +829,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86,
 				     0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5,
 				     0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78,
 				     0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07,
@@ -838,7 +838,7 @@
 				     0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21,
 				     0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f,
 				     0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17,
-				     0xae, 0xde, 0xfb, 0x90, 0x62, 0x59, 0xcb },
+				     0xae, 0xde, 0xfb, 0x19, 0xDa, 0x9a, 0xc2 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -865,7 +865,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -877,7 +877,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7,
 				     0xf5, 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce,
 				     0xbe, 0x48, 0xb5, 0x0b, 0x6a, 0x73, 0x9a,
 				     0x5a, 0xa3, 0x06, 0x47, 0x40, 0x96, 0xcf,
@@ -886,7 +886,7 @@
 				     0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, 0xe8,
 				     0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2,
 				     0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c,
-				     0x23, 0xfa, 0x16, 0x72, 0x3e, 0x14, 0xa9 },
+				     0x23, 0xfa, 0x16, 0x6c, 0xcb, 0x92, 0xdf },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -913,7 +913,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -925,7 +925,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86,
 				     0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5,
 				     0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78,
 				     0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07,
@@ -934,7 +934,7 @@
 				     0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21,
 				     0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f,
 				     0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17,
-				     0xae, 0xde, 0xfb, 0x3f, 0x47, 0xaa, 0x9b },
+				     0xae, 0xde, 0xfb, 0x5b, 0xc2, 0x9f, 0x29 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -961,7 +961,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -974,7 +974,7 @@
 		.in_len = 66,
 		.data_out =
 			(uint8_t[]){
-				0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, 0xf5,
+				0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, 0xf5,
 				0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, 0xbe, 0x48,
 				0xb5, 0x0b, 0x6a, 0x73, 0x9a, 0x5a, 0xa3, 0x06,
 				0x47, 0x40, 0x96, 0xcf, 0x86, 0x98, 0x3d, 0x6f,
@@ -982,7 +982,7 @@
 				0xa6, 0x24, 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44,
 				0xe8, 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2,
 				0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, 0x23,
-				0xfa, 0x16, 0x52, 0x69, 0x16, 0xfc,
+				0xfa, 0x16, 0x5d, 0x83, 0x73, 0x34,
 			},
 		.sn_size = 12,
 		.hfn = 0x1,
@@ -1010,7 +1010,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1022,7 +1022,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86,
 				     0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5,
 				     0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78,
 				     0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07,
@@ -1031,7 +1031,7 @@
 				     0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21,
 				     0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f,
 				     0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17,
-				     0xae, 0xde, 0xfb, 0xf5, 0xda, 0x73, 0xa7 },
+				     0xae, 0xde, 0xfb, 0xff, 0xf9, 0xef, 0xff },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1154,7 +1154,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1166,7 +1166,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38,
 				     0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e,
 				     0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40,
 				     0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22,
@@ -1175,7 +1175,7 @@
 				     0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1,
 				     0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3,
 				     0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97,
-				     0xbd, 0xba, 0x08, 0x39, 0x63, 0x21, 0x82 },
+				     0xbd, 0xba, 0x08, 0xb2, 0x24, 0x23, 0xd9 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1202,7 +1202,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1214,7 +1214,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41,
 				     0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36,
 				     0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8,
 				     0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e,
@@ -1223,7 +1223,7 @@
 				     0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7,
 				     0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47,
 				     0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde,
-				     0xc9, 0x0a, 0x64, 0x8e, 0x79, 0xde, 0xaa },
+				     0xc9, 0x0a, 0x64, 0x07, 0xc1, 0x1d, 0xa3 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1250,7 +1250,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1262,7 +1262,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38,
 				     0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e,
 				     0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40,
 				     0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22,
@@ -1271,7 +1271,7 @@
 				     0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1,
 				     0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3,
 				     0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97,
-				     0xbd, 0xba, 0x08, 0x72, 0xaa, 0x20, 0x3a },
+				     0xbd, 0xba, 0x08, 0x6c, 0x5f, 0xa6, 0x4c },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1298,7 +1298,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1310,7 +1310,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41,
 				     0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36,
 				     0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8,
 				     0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e,
@@ -1319,7 +1319,7 @@
 				     0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7,
 				     0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47,
 				     0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde,
-				     0xc9, 0x0a, 0x64, 0x21, 0x5c, 0x2d, 0xfa },
+				     0xc9, 0x0a, 0x64, 0x45, 0xd9, 0x18, 0x48 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1346,7 +1346,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1358,7 +1358,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38,
 				     0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e,
 				     0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40,
 				     0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22,
@@ -1367,7 +1367,7 @@
 				     0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1,
 				     0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3,
 				     0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97,
-				     0xbd, 0xba, 0x08, 0x52, 0xfd, 0x22, 0x6f },
+				     0xbd, 0xba, 0x08, 0x5d, 0x17, 0x47, 0xa7 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1394,7 +1394,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1406,7 +1406,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41,
 				     0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36,
 				     0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8,
 				     0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e,
@@ -1415,7 +1415,7 @@
 				     0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7,
 				     0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47,
 				     0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde,
-				     0xc9, 0x0a, 0x64, 0xeb, 0xc1, 0xf4, 0xc6 },
+				     0xc9, 0x0a, 0x64, 0xe1, 0xe2, 0x68, 0x9e },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1538,7 +1538,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1550,7 +1550,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27,
 				     0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b,
 				     0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67,
 				     0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37,
@@ -1559,7 +1559,7 @@
 				     0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d,
 				     0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56,
 				     0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5,
-				     0xaf, 0x96, 0x5c, 0xb6, 0x6c, 0xeb, 0x14 },
+				     0xaf, 0x96, 0x5c, 0x3d, 0x2b, 0xe9, 0x4f },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1586,7 +1586,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1598,7 +1598,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08,
 				     0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f,
 				     0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29,
 				     0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59,
@@ -1607,7 +1607,7 @@
 				     0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51,
 				     0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64,
 				     0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13,
-				     0x91, 0xaf, 0x24, 0xb2, 0x82, 0xfb, 0x27 },
+				     0x91, 0xaf, 0x24, 0x3b, 0x3a, 0x38, 0x2e },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1634,7 +1634,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1646,7 +1646,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27,
 				     0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b,
 				     0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67,
 				     0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37,
@@ -1655,7 +1655,7 @@
 				     0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d,
 				     0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56,
 				     0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5,
-				     0xaf, 0x96, 0x5c, 0xfd, 0xa5, 0xea, 0xac },
+				     0xaf, 0x96, 0x5c, 0xe3, 0x50, 0x6c, 0xda },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1682,7 +1682,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1694,7 +1694,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08,
 				     0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f,
 				     0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29,
 				     0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59,
@@ -1703,7 +1703,7 @@
 				     0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51,
 				     0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64,
 				     0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13,
-				     0x91, 0xaf, 0x24, 0x1d, 0xa7, 0x08, 0x77 },
+				     0x91, 0xaf, 0x24, 0x79, 0x22, 0x3d, 0xc5 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1730,7 +1730,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1742,7 +1742,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27,
 				     0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b,
 				     0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67,
 				     0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37,
@@ -1751,7 +1751,7 @@
 				     0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d,
 				     0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56,
 				     0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5,
-				     0xaf, 0x96, 0x5c, 0xdd, 0xf2, 0xe8, 0xf9 },
+				     0xaf, 0x96, 0x5c, 0xd2, 0x18, 0x8d, 0x31 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -1778,7 +1778,7 @@
 					 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82,
 					 0xdc, 0xb6, 0xc2, 0x36 },
 		.data_in =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d,
 				     0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde,
 				     0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb,
 				     0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf,
@@ -1790,7 +1790,7 @@
 				     0x7a, 0xf9, 0xdd },
 		.in_len = 66,
 		.data_out =
-			(uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08,
+			(uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08,
 				     0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f,
 				     0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29,
 				     0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59,
@@ -1799,7 +1799,7 @@
 				     0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51,
 				     0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64,
 				     0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13,
-				     0x91, 0xaf, 0x24, 0xd7, 0x3a, 0xd1, 0x4b },
+				     0x91, 0xaf, 0x24, 0xdd, 0x19, 0x4d, 0x13 },
 		.sn_size = 12,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2556,7 +2556,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2568,7 +2568,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01,
 				     0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b,
 				     0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7,
 				     0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a,
@@ -2577,8 +2577,8 @@
 				     0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76,
 				     0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47,
 				     0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6,
-				     0x0e, 0xf4, 0xe7, 0xe8, 0x78, 0xdd, 0xc1,
-				     0x92 },
+				     0x0e, 0xf4, 0xe7, 0xe8, 0xc0, 0x48, 0x6a,
+				     0x7c },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2605,7 +2605,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2617,7 +2617,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9,
 				     0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50,
 				     0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab,
 				     0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01,
@@ -2626,8 +2626,8 @@
 				     0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87,
 				     0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09,
 				     0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e,
-				     0x9c, 0x85, 0x0b, 0xf7, 0xb1, 0x80, 0x30,
-				     0xa5 },
+				     0x9c, 0x85, 0x0b, 0xf7, 0x17, 0x28, 0x0f,
+				     0x7d },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2654,7 +2654,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2666,7 +2666,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01,
 				     0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b,
 				     0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7,
 				     0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a,
@@ -2675,8 +2675,8 @@
 				     0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76,
 				     0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47,
 				     0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6,
-				     0x0e, 0xf4, 0xe7, 0xe8, 0x08, 0xa6, 0xdb,
-				     0x19 },
+				     0x0e, 0xf4, 0xe7, 0xe8, 0x8e, 0x76, 0x4a,
+				     0x4e },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2703,7 +2703,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2715,7 +2715,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9,
 				     0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50,
 				     0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab,
 				     0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01,
@@ -2724,8 +2724,8 @@
 				     0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87,
 				     0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09,
 				     0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e,
-				     0x9c, 0x85, 0x0b, 0xf7, 0x97, 0x5a, 0x56,
-				     0xab },
+				     0x9c, 0x85, 0x0b, 0xf7, 0xc1, 0x27, 0x82,
+				     0xc3 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2752,7 +2752,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2764,7 +2764,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01,
 				     0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b,
 				     0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7,
 				     0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a,
@@ -2773,8 +2773,8 @@
 				     0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76,
 				     0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47,
 				     0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6,
-				     0x0e, 0xf4, 0xe7, 0xe8, 0x08, 0x68, 0xff,
-				     0x7c },
+				     0x0e, 0xf4, 0xe7, 0xe8, 0x97, 0x76, 0xce,
+				     0xac },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2801,7 +2801,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2813,7 +2813,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9,
 				     0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50,
 				     0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab,
 				     0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01,
@@ -2822,8 +2822,8 @@
 				     0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87,
 				     0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09,
 				     0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e,
-				     0x9c, 0x85, 0x0b, 0xf7, 0x41, 0xdd, 0x19,
-				     0x32 },
+				     0x9c, 0x85, 0x0b, 0xf7, 0x69, 0x56, 0x6f,
+				     0xaf },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2948,7 +2948,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -2960,7 +2960,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d,
 				     0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f,
 				     0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde,
 				     0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51,
@@ -2969,8 +2969,8 @@
 				     0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a,
 				     0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95,
 				     0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c,
-				     0xae, 0x22, 0x59, 0x11, 0xf6, 0x97, 0x0b,
-				     0x7b },
+				     0xae, 0x22, 0x59, 0x11, 0x4e, 0x02, 0xa0,
+				     0x95 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -2997,7 +2997,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3009,7 +3009,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5,
 				     0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30,
 				     0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e,
 				     0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f,
@@ -3018,8 +3018,8 @@
 				     0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17,
 				     0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a,
 				     0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f,
-				     0xad, 0x3d, 0x99, 0x4a, 0xa3, 0xab, 0xd5,
-				     0x7c },
+				     0xad, 0x3d, 0x99, 0x4a, 0x05, 0x03, 0xea,
+				     0xa4 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3046,7 +3046,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3058,7 +3058,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d,
 				     0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f,
 				     0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde,
 				     0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51,
@@ -3067,8 +3067,8 @@
 				     0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a,
 				     0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95,
 				     0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c,
-				     0xae, 0x22, 0x59, 0x11, 0x86, 0xec, 0x11,
-				     0xf0 },
+				     0xae, 0x22, 0x59, 0x11, 0x00, 0x3c, 0x80,
+				     0xa7 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3095,7 +3095,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3107,7 +3107,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5,
 				     0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30,
 				     0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e,
 				     0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f,
@@ -3116,8 +3116,8 @@
 				     0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17,
 				     0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a,
 				     0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f,
-				     0xad, 0x3d, 0x99, 0x4a, 0x85, 0x71, 0xb3,
-				     0x72 },
+				     0xad, 0x3d, 0x99, 0x4a, 0xd3, 0x0c, 0x67,
+				     0x1a },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3144,7 +3144,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3156,7 +3156,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d,
 				     0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f,
 				     0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde,
 				     0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51,
@@ -3165,8 +3165,8 @@
 				     0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a,
 				     0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95,
 				     0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c,
-				     0xae, 0x22, 0x59, 0x11, 0x86, 0x22, 0x35,
-				     0x95 },
+				     0xae, 0x22, 0x59, 0x11, 0x19, 0x3c, 0x04,
+				     0x45 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3193,7 +3193,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3205,7 +3205,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5,
 				     0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30,
 				     0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e,
 				     0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f,
@@ -3214,8 +3214,8 @@
 				     0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17,
 				     0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a,
 				     0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f,
-				     0xad, 0x3d, 0x99, 0x4a, 0x53, 0xf6, 0xfc,
-				     0xeb },
+				     0xad, 0x3d, 0x99, 0x4a, 0x7b, 0x7d, 0x8a,
+				     0x76 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3340,7 +3340,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3352,7 +3352,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde,
 				     0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93,
 				     0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7,
 				     0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d,
@@ -3361,8 +3361,8 @@
 				     0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12,
 				     0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60,
 				     0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7,
-				     0x0c, 0x61, 0x76, 0xdc, 0x25, 0x8a, 0x31,
-				     0xed },
+				     0x0c, 0x61, 0x76, 0xdc, 0x9d, 0x1f, 0x9a,
+				     0x03 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3389,7 +3389,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3401,7 +3401,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82,
 				     0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23,
 				     0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26,
 				     0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac,
@@ -3410,8 +3410,8 @@
 				     0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e,
 				     0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25,
 				     0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f,
-				     0x6c, 0xed, 0x6a, 0x50, 0xf3, 0x5e, 0x90,
-				     0x42 },
+				     0x6c, 0xed, 0x6a, 0x50, 0x55, 0xf6, 0xaf,
+				     0x9a },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3438,7 +3438,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3450,7 +3450,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde,
 				     0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93,
 				     0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7,
 				     0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d,
@@ -3459,8 +3459,8 @@
 				     0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12,
 				     0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60,
 				     0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7,
-				     0x0c, 0x61, 0x76, 0xdc, 0x55, 0xf1, 0x2b,
-				     0x66 },
+				     0x0c, 0x61, 0x76, 0xdc, 0xd3, 0x21, 0xba,
+				     0x31 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3487,7 +3487,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3499,7 +3499,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82,
 				     0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23,
 				     0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26,
 				     0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac,
@@ -3508,8 +3508,8 @@
 				     0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e,
 				     0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25,
 				     0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f,
-				     0x6c, 0xed, 0x6a, 0x50, 0xd5, 0x84, 0xf6,
-				     0x4c },
+				     0x6c, 0xed, 0x6a, 0x50, 0x83, 0xf9, 0x22,
+				     0x24 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3536,7 +3536,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3548,7 +3548,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde,
 				     0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93,
 				     0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7,
 				     0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d,
@@ -3557,8 +3557,8 @@
 				     0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12,
 				     0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60,
 				     0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7,
-				     0x0c, 0x61, 0x76, 0xdc, 0x55, 0x3f, 0x0f,
-				     0x03 },
+				     0x0c, 0x61, 0x76, 0xdc, 0xca, 0x21, 0x3e,
+				     0xd3 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
@@ -3585,7 +3585,7 @@
 					 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0,
 					 0x31, 0x5f, 0x3a, 0x15 },
 		.data_in =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f,
 				     0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88,
 				     0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb,
 				     0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f,
@@ -3597,7 +3597,7 @@
 				     0xf9, 0xdd, 0xcc, 0x69 },
 		.in_len = 67,
 		.data_out =
-			(uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82,
+			(uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82,
 				     0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23,
 				     0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26,
 				     0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac,
@@ -3606,8 +3606,8 @@
 				     0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e,
 				     0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25,
 				     0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f,
-				     0x6c, 0xed, 0x6a, 0x50, 0x03, 0x03, 0xb9,
-				     0xd5 },
+				     0x6c, 0xed, 0x6a, 0x50, 0x2b, 0x88, 0xcf,
+				     0x48 },
 		.sn_size = 18,
 		.hfn = 0x1,
 		.hfn_threshold = 0xfa558,
diff -Nru dpdk-20.11.8/app/test/test_event_timer_adapter.c dpdk-20.11.9/app/test/test_event_timer_adapter.c
--- dpdk-20.11.8/app/test/test_event_timer_adapter.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/test_event_timer_adapter.c	2023-08-15 16:54:57.000000000 +0100
@@ -47,9 +47,10 @@
 static uint64_t global_info_bkt_tck_ns;
 static volatile uint8_t arm_done;
 
-#define CALC_TICKS(tks)					\
-	ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns)
+#define CALC_TICKS(tks) ceil((double)((tks) * global_bkt_tck_ns) / global_info_bkt_tck_ns)
 
+/* Wait double timeout ticks for software and an extra tick for hardware */
+#define WAIT_TICKS(tks) (using_services ? 2 * (tks) : tks + 1)
 
 static bool using_services;
 static uint32_t test_lcore1;
@@ -385,10 +386,31 @@
 	rte_mempool_free(eventdev_test_mempool);
 }
 
+static inline uint16_t
+timeout_event_dequeue(struct rte_event *evs, uint64_t nb_evs, uint64_t ticks)
+{
+	uint16_t ev_cnt = 0;
+	uint64_t end_cycle;
+
+	if (using_services && nb_evs == MAX_TIMERS)
+		ticks = 2 * ticks;
+
+	end_cycle = rte_rdtsc() + ticks * global_bkt_tck_ns * rte_get_tsc_hz() / 1E9;
+
+	while (ev_cnt < nb_evs && rte_rdtsc() < end_cycle) {
+		ev_cnt += rte_event_dequeue_burst(evdev, TEST_PORT_ID, &evs[ev_cnt], nb_evs, 0);
+		rte_pause();
+	}
+
+	return ev_cnt;
+}
+
 static inline int
 test_timer_state(void)
 {
 	struct rte_event_timer *ev_tim;
+	const uint64_t max_ticks = 100;
+	uint64_t ticks, wait_ticks;
 	struct rte_event ev;
 	const struct rte_event_timer tim = {
 		.ev.op = RTE_EVENT_OP_NEW,
@@ -399,11 +421,10 @@
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
 	};
 
-
 	rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
 	*ev_tim = tim;
 	ev_tim->ev.event_ptr = ev_tim;
-	ev_tim->timeout_ticks = CALC_TICKS(120);
+	ev_tim->timeout_ticks = CALC_TICKS(max_ticks + 20);
 
 	TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0,
 			"Armed timer exceeding max_timeout.");
@@ -411,8 +432,9 @@
 			"Improper timer state set expected %d returned %d",
 			RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state);
 
+	ticks = 10;
 	ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
-	ev_tim->timeout_ticks = CALC_TICKS(10);
+	ev_tim->timeout_ticks = CALC_TICKS(ticks);
 
 	TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
 			"Failed to arm timer with proper timeout.");
@@ -421,14 +443,15 @@
 			RTE_EVENT_TIMER_ARMED, ev_tim->state);
 
 	if (!using_services)
-		rte_delay_us(20);
+		wait_ticks = 2 * ticks;
 	else
-		rte_delay_us(1000 + 200);
-	TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1,
-			"Armed timer failed to trigger.");
+		wait_ticks = ticks;
+
+	TEST_ASSERT_EQUAL(timeout_event_dequeue(&ev, 1, WAIT_TICKS(wait_ticks)), 1,
+			  "Armed timer failed to trigger.");
 
 	ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
-	ev_tim->timeout_ticks = CALC_TICKS(90);
+	ev_tim->timeout_ticks = CALC_TICKS(max_ticks - 10);
 	TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
 			"Failed to arm timer with proper timeout.");
 	TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1),
@@ -1061,8 +1084,9 @@
 	int ret, i, n;
 	int num_evtims = MAX_TIMERS;
 	struct rte_event_timer *evtims[num_evtims];
-	struct rte_event evs[BATCH_SIZE];
+	struct rte_event evs[num_evtims];
 	struct rte_event_timer_adapter_stats stats;
+	uint64_t ticks = 5;
 	const struct rte_event_timer init_tim = {
 		.ev.op = RTE_EVENT_OP_NEW,
 		.ev.queue_id = TEST_QUEUE_ID,
@@ -1070,7 +1094,7 @@
 		.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
-		.timeout_ticks = CALC_TICKS(5), // expire in .5 sec
+		.timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
 	};
 
 	ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
@@ -1095,31 +1119,12 @@
 			  "succeeded = %d, rte_errno = %s",
 			  num_evtims, ret, rte_strerror(rte_errno));
 
-	rte_delay_ms(1000);
-
-#define MAX_TRIES num_evtims
-	int sum = 0;
-	int tries = 0;
-	bool done = false;
-	while (!done) {
-		sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
-					       RTE_DIM(evs), 10);
-		if (sum >= num_evtims || ++tries >= MAX_TRIES)
-			done = true;
-
-		rte_delay_ms(10);
-	}
-
-	TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
-			  "got %d", num_evtims, sum);
-
-	TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
-
-	rte_delay_ms(100);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
+	TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d",
+			  num_evtims, n);
 
 	/* Make sure the eventdev is still empty */
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
-				      10);
+	n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1));
 
 	TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
 			  "events from event device");
@@ -1156,6 +1161,7 @@
 	struct rte_event_timer_adapter *adapter = timdev;
 	struct rte_event_timer *evtim = NULL;
 	struct rte_event evs[BATCH_SIZE];
+	uint64_t ticks = 5;
 	const struct rte_event_timer init_tim = {
 		.ev.op = RTE_EVENT_OP_NEW,
 		.ev.queue_id = TEST_QUEUE_ID,
@@ -1163,7 +1169,7 @@
 		.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
-		.timeout_ticks = CALC_TICKS(5), // expire in .5 sec
+		.timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
 	};
 
 	rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
@@ -1190,10 +1196,7 @@
 	TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
 			  "after arming already armed timer");
 
-	/* Let timer expire */
-	rte_delay_ms(1000);
-
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
 	TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
 			  "events from event device");
 
@@ -1213,6 +1216,7 @@
 	struct rte_event_timer_adapter *adapter = timdev;
 	struct rte_event_timer *evtim = NULL;
 	struct rte_event evs[BATCH_SIZE];
+	uint64_t ticks = 5;
 	const struct rte_event_timer init_tim = {
 		.ev.op = RTE_EVENT_OP_NEW,
 		.ev.queue_id = TEST_QUEUE_ID,
@@ -1220,7 +1224,7 @@
 		.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
-		.timeout_ticks = CALC_TICKS(5), // expire in .5 sec
+		.timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
 	};
 
 	rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
@@ -1240,10 +1244,7 @@
 	TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
 			  "after double-arm");
 
-	/* Let timer expire */
-	rte_delay_ms(600);
-
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
 	TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - "
 			  "expected: 1, actual: %d", n);
 
@@ -1270,6 +1271,7 @@
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
 	};
+	uint64_t ticks = 30;
 
 	rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
 	if (evtim == NULL) {
@@ -1279,7 +1281,7 @@
 
 	/* Set up an event timer */
 	*evtim = init_tim;
-	evtim->timeout_ticks = CALC_TICKS(30),	// expire in 3 secs
+	evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 secs */
 	evtim->ev.event_ptr = evtim;
 
 	ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
@@ -1288,17 +1290,10 @@
 	TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event "
 			  "timer in incorrect state");
 
-	rte_delay_ms(2999);
-
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), ticks - 1);
 	TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event");
 
-	/* Delay 100 ms to account for the adapter tick window - should let us
-	 * dequeue one event
-	 */
-	rte_delay_ms(100);
-
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(1));
 	TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer "
 			  "expiry events", n);
 	TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER,
@@ -1330,6 +1325,7 @@
 		.ev.event_type = RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
 	};
+	uint64_t ticks = 1;
 
 	rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
 	if (evtim == NULL) {
@@ -1339,7 +1335,7 @@
 
 	/* Set up a timer */
 	*evtim = init_tim;
-	evtim->timeout_ticks = CALC_TICKS(1);  // expire in 0.1 sec
+	evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 0.1 sec */
 	evtim->ev.event_ptr = evtim;
 
 	/* Arm it */
@@ -1347,10 +1343,7 @@
 	TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
 			  rte_strerror(rte_errno));
 
-	/* Add 100ms to account for the adapter tick window */
-	rte_delay_ms(100 + 100);
-
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
 	TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
 			  "events from event device");
 
@@ -1367,10 +1360,7 @@
 	TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
 			  rte_strerror(rte_errno));
 
-	/* Add 100ms to account for the adapter tick window */
-	rte_delay_ms(100 + 100);
-
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
 	TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
 			  "events from event device");
 
@@ -1392,7 +1382,8 @@
 	int ret, i, n;
 	int num_evtims = MAX_TIMERS;
 	struct rte_event_timer *evtims[num_evtims];
-	struct rte_event evs[BATCH_SIZE];
+	struct rte_event evs[num_evtims];
+	uint64_t ticks = 5;
 	const struct rte_event_timer init_tim = {
 		.ev.op = RTE_EVENT_OP_NEW,
 		.ev.queue_id = TEST_QUEUE_ID,
@@ -1400,7 +1391,7 @@
 		.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
-		.timeout_ticks = CALC_TICKS(5), // expire in .5 sec
+		.timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */
 	};
 
 	ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
@@ -1420,31 +1411,12 @@
 			  "succeeded = %d, rte_errno = %s",
 			  num_evtims, ret, rte_strerror(rte_errno));
 
-	rte_delay_ms(1000);
-
-#define MAX_TRIES num_evtims
-	int sum = 0;
-	int tries = 0;
-	bool done = false;
-	while (!done) {
-		sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
-					       RTE_DIM(evs), 10);
-		if (sum >= num_evtims || ++tries >= MAX_TRIES)
-			done = true;
-
-		rte_delay_ms(10);
-	}
-
-	TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
-			  "got %d", num_evtims, sum);
-
-	TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
-
-	rte_delay_ms(100);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
+	TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d",
+			  num_evtims, n);
 
 	/* Make sure the eventdev is still empty */
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
-				    10);
+	n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1));
 
 	TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
 			  "events from event device");
@@ -1564,6 +1536,7 @@
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
 	};
+	uint64_t ticks = 30;
 
 	rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
 	if (evtim == NULL) {
@@ -1581,7 +1554,7 @@
 	/* Set up a timer */
 	*evtim = init_tim;
 	evtim->ev.event_ptr = evtim;
-	evtim->timeout_ticks = CALC_TICKS(30);  // expire in 3 sec
+	evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */
 
 	/* Check that cancelling an inited but unarmed timer fails */
 	ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
@@ -1605,10 +1578,8 @@
 	TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED,
 			  "evtim in incorrect state");
 
-	rte_delay_ms(3000);
-
 	/* Make sure that no expiry event was generated */
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
 	TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
 
 	rte_mempool_put(eventdev_test_mempool, evtim);
@@ -1631,8 +1602,8 @@
 		.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
 		.state = RTE_EVENT_TIMER_NOT_ARMED,
-		.timeout_ticks = CALC_TICKS(5), // expire in .5 sec
 	};
+	uint64_t ticks = 30;
 
 	rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
 	if (evtim == NULL) {
@@ -1643,7 +1614,7 @@
 	/* Set up a timer */
 	*evtim = init_tim;
 	evtim->ev.event_ptr = evtim;
-	evtim->timeout_ticks = CALC_TICKS(30);  // expire in 3 sec
+	evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */
 
 	ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
 	TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
@@ -1665,10 +1636,8 @@
 	TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
 			  "after double-cancel: rte_errno = %d", rte_errno);
 
-	rte_delay_ms(3000);
-
 	/* Still make sure that no expiry event was generated */
-	n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+	n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks));
 	TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
 
 	rte_mempool_put(eventdev_test_mempool, evtim);
diff -Nru dpdk-20.11.8/app/test/test_link_bonding.c dpdk-20.11.9/app/test/test_link_bonding.c
--- dpdk-20.11.8/app/test/test_link_bonding.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/test_link_bonding.c	2023-08-15 16:54:57.000000000 +0100
@@ -2,7 +2,7 @@
  * Copyright(c) 2010-2014 Intel Corporation
  */
 
-#include "unistd.h"
+#include <unistd.h>
 #include <string.h>
 #include <stdarg.h>
 #include <stdio.h>
diff -Nru dpdk-20.11.8/app/test/test_malloc.c dpdk-20.11.9/app/test/test_malloc.c
--- dpdk-20.11.8/app/test/test_malloc.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/test_malloc.c	2023-08-15 16:54:57.000000000 +0100
@@ -301,11 +301,11 @@
 	rte_malloc_get_socket_stats(socket,&post_stats);
 	/* Check statistics reported are correct */
 	/* All post stats should be equal to pre stats after alloc freed */
-	if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) &&
-			(post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) &&
-			(post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&&
-			(post_stats.alloc_count!=pre_stats.alloc_count)&&
-			(post_stats.free_count!=pre_stats.free_count)) {
+	if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) ||
+			(post_stats.heap_freesz_bytes != pre_stats.heap_freesz_bytes) ||
+			(post_stats.heap_allocsz_bytes != pre_stats.heap_allocsz_bytes) ||
+			(post_stats.alloc_count != pre_stats.alloc_count) ||
+			(post_stats.free_count != pre_stats.free_count)) {
 		printf("Malloc statistics are incorrect - freed alloc\n");
 		return -1;
 	}
@@ -362,11 +362,11 @@
 		return -1;
 	}
 
-	if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) &&
-			(post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) &&
-			(post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&&
-			(post_stats.alloc_count!=pre_stats.alloc_count)&&
-			(post_stats.free_count!=pre_stats.free_count)) {
+	if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) ||
+			(post_stats.heap_freesz_bytes != pre_stats.heap_freesz_bytes) ||
+			(post_stats.heap_allocsz_bytes != pre_stats.heap_allocsz_bytes) ||
+			(post_stats.alloc_count != pre_stats.alloc_count) ||
+			(post_stats.free_count != pre_stats.free_count)) {
 		printf("Malloc statistics are incorrect - freed alloc\n");
 		return -1;
 	}
@@ -927,6 +927,7 @@
 	if (mem == NULL)
 		return -1;
 	if (addr_to_socket(mem) != desired_socket) {
+		rte_free(mem);
 		return -1;
 	}
 	rte_free(mem);
diff -Nru dpdk-20.11.8/app/test/test_mbuf.c dpdk-20.11.9/app/test/test_mbuf.c
--- dpdk-20.11.8/app/test/test_mbuf.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test/test_mbuf.c	2023-08-15 16:54:57.000000000 +0100
@@ -1173,37 +1173,16 @@
 #endif
 }
 
-#include <unistd.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/wait.h>
-
-/* use fork() to test mbuf errors panic */
-static int
-verify_mbuf_check_panics(struct rte_mbuf *buf)
+/* Verify if mbuf can pass the check */
+static bool
+mbuf_check_pass(struct rte_mbuf *buf)
 {
-	int pid;
-	int status;
-
-	pid = fork();
-
-	if (pid == 0) {
-		struct rlimit rl;
+	const char *reason;
 
-		/* No need to generate a coredump when panicking. */
-		rl.rlim_cur = rl.rlim_max = 0;
-		setrlimit(RLIMIT_CORE, &rl);
-		rte_mbuf_sanity_check(buf, 1); /* should panic */
-		exit(0);  /* return normally if it doesn't panic */
-	} else if (pid < 0) {
-		printf("Fork Failed\n");
-		return -1;
-	}
-	wait(&status);
-	if(status == 0)
-		return -1;
+	if (rte_mbuf_check(buf, 1, &reason) == 0)
+		return true;
 
-	return 0;
+	return false;
 }
 
 static int
@@ -1220,47 +1199,47 @@
 		return -1;
 
 	printf("Checking good mbuf initially\n");
-	if (verify_mbuf_check_panics(buf) != -1)
+	if (!mbuf_check_pass(buf))
 		return -1;
 
 	printf("Now checking for error conditions\n");
 
-	if (verify_mbuf_check_panics(NULL)) {
+	if (mbuf_check_pass(NULL)) {
 		printf("Error with NULL mbuf test\n");
 		return -1;
 	}
 
 	badbuf = *buf;
 	badbuf.pool = NULL;
-	if (verify_mbuf_check_panics(&badbuf)) {
+	if (mbuf_check_pass(&badbuf)) {
 		printf("Error with bad-pool mbuf test\n");
 		return -1;
 	}
 
 	badbuf = *buf;
 	badbuf.buf_iova = 0;
-	if (verify_mbuf_check_panics(&badbuf)) {
+	if (mbuf_check_pass(&badbuf)) {
 		printf("Error with bad-physaddr mbuf test\n");
 		return -1;
 	}
 
 	badbuf = *buf;
 	badbuf.buf_addr = NULL;
-	if (verify_mbuf_check_panics(&badbuf)) {
+	if (mbuf_check_pass(&badbuf)) {
 		printf("Error with bad-addr mbuf test\n");
 		return -1;
 	}
 
 	badbuf = *buf;
 	badbuf.refcnt = 0;
-	if (verify_mbuf_check_panics(&badbuf)) {
+	if (mbuf_check_pass(&badbuf)) {
 		printf("Error with bad-refcnt(0) mbuf test\n");
 		return -1;
 	}
 
 	badbuf = *buf;
 	badbuf.refcnt = UINT16_MAX;
-	if (verify_mbuf_check_panics(&badbuf)) {
+	if (mbuf_check_pass(&badbuf)) {
 		printf("Error with bad-refcnt(MAX) mbuf test\n");
 		return -1;
 	}
diff -Nru dpdk-20.11.8/app/test-crypto-perf/main.c dpdk-20.11.9/app/test-crypto-perf/main.c
--- dpdk-20.11.8/app/test-crypto-perf/main.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test-crypto-perf/main.c	2023-08-15 16:54:57.000000000 +0100
@@ -191,11 +191,10 @@
 #endif
 
 		struct rte_cryptodev_info cdev_info;
-		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
-		/* range check the socket_id - negative values become big
-		 * positive ones due to use of unsigned value
-		 */
-		if (socket_id >= RTE_MAX_NUMA_NODES)
+		int socket_id = rte_cryptodev_socket_id(cdev_id);
+
+		/* Use the first socket if SOCKET_ID_ANY is returned. */
+		if (socket_id == SOCKET_ID_ANY)
 			socket_id = 0;
 
 		rte_cryptodev_info_get(cdev_id, &cdev_info);
@@ -607,7 +606,11 @@
 
 		cdev_id = enabled_cdevs[cdev_index];
 
-		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+		int socket_id = rte_cryptodev_socket_id(cdev_id);
+
+		/* Use the first socket if SOCKET_ID_ANY is returned. */
+		if (socket_id == SOCKET_ID_ANY)
+			socket_id = 0;
 
 		ctx[i] = cperf_testmap[opts.test].constructor(
 				session_pool_socket[socket_id].sess_mp,
diff -Nru dpdk-20.11.8/app/test-pmd/csumonly.c dpdk-20.11.9/app/test-pmd/csumonly.c
--- dpdk-20.11.8/app/test-pmd/csumonly.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/app/test-pmd/csumonly.c	2023-08-15 16:54:57.000000000 +0100
@@ -246,7 +246,7 @@
 		info->l4_proto = 0;
 	}
 
-	info->l2_len += RTE_ETHER_GTP_HLEN;
+	info->l2_len += gtp_len + sizeof(*udp_hdr);
 }
 
 /* Parse a vxlan header */
diff -Nru dpdk-20.11.8/debian/changelog dpdk-20.11.9/debian/changelog
--- dpdk-20.11.8/debian/changelog	2023-04-28 12:27:52.000000000 +0100
+++ dpdk-20.11.9/debian/changelog	2023-09-21 12:50:42.000000000 +0100
@@ -1,3 +1,11 @@
+dpdk (20.11.9-1~deb11u1) bullseye; urgency=medium
+
+  * New upstream release 20.11.9; for a full list of changes see:
+    http://doc.dpdk.org/guides-20.11/rel_notes/release_20_11.html
+  * Refresh patches to remove fuzz from 20.11.9
+
+ -- Luca Boccassi <bluca@debian.org>  Thu, 21 Sep 2023 12:50:42 +0100
+
 dpdk (20.11.8-1~deb11u1) bullseye; urgency=medium
 
   * New upstream release 20.11.8; for a full list of changes see:
diff -Nru dpdk-20.11.8/debian/patches/disable_autopkgtest_fails.patch dpdk-20.11.9/debian/patches/disable_autopkgtest_fails.patch
--- dpdk-20.11.8/debian/patches/disable_autopkgtest_fails.patch	2023-04-28 12:08:31.000000000 +0100
+++ dpdk-20.11.9/debian/patches/disable_autopkgtest_fails.patch	2023-09-21 12:50:42.000000000 +0100
@@ -8,19 +8,19 @@
 Last-Update: 2021-07-13
 --- a/app/test/meson.build
 +++ b/app/test/meson.build
-@@ -210,7 +210,7 @@ fast_tests = [
-         ['fib6_autotest', true],
+@@ -211,7 +211,7 @@
          ['func_reentrancy_autotest', false],
          ['flow_classify_autotest', false],
+         ['graph_autotest', true],
 -        ['hash_autotest', true],
 +        ['hash_autotest', false],
          ['interrupt_autotest', true],
          ['ipfrag_autotest', false],
          ['logs_autotest', true],
-@@ -225,9 +225,9 @@ fast_tests = [
-         ['memzone_autotest', false],
+@@ -227,9 +227,9 @@
          ['meter_autotest', true],
          ['multiprocess_autotest', false],
+         ['node_list_dump', true],
 -        ['per_lcore_autotest', true],
 +        ['per_lcore_autotest', false],
          ['prefetch_autotest', true],
@@ -29,7 +29,7 @@
          ['red_autotest', true],
          ['rib_autotest', true],
          ['rib6_autotest', true],
-@@ -242,7 +242,7 @@ fast_tests = [
+@@ -243,7 +243,7 @@
          ['stack_autotest', false],
          ['stack_lf_autotest', false],
          ['string_autotest', true],
@@ -38,7 +38,7 @@
          ['tailq_autotest', true],
          ['timer_autotest', false],
          ['user_delay_us', true],
-@@ -262,7 +262,7 @@ fast_tests = [
+@@ -263,7 +263,7 @@
          ['power_autotest', true],
          ['power_kvm_vm_autotest', false],
          ['reorder_autotest', true],
diff -Nru dpdk-20.11.8/doc/api/doxy-api.conf.in dpdk-20.11.9/doc/api/doxy-api.conf.in
--- dpdk-20.11.8/doc/api/doxy-api.conf.in	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/api/doxy-api.conf.in	2023-08-15 16:54:57.000000000 +0100
@@ -97,7 +97,6 @@
 VERBATIM_HEADERS        = NO
 ALPHABETICAL_INDEX      = NO
 
-HTML_TIMESTAMP          = NO
 HTML_DYNAMIC_SECTIONS   = YES
 SEARCHENGINE            = YES
 SORT_MEMBER_DOCS        = NO
diff -Nru dpdk-20.11.8/doc/guides/nics/tap.rst dpdk-20.11.9/doc/guides/nics/tap.rst
--- dpdk-20.11.8/doc/guides/nics/tap.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/nics/tap.rst	2023-08-15 16:54:57.000000000 +0100
@@ -34,14 +34,14 @@
 
 The MAC address will have a fixed value with the last octet incrementing by one
 for each interface string containing ``mac=fixed``. The MAC address is formatted
-as 00:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the
-actual MAC address: ``00:64:74:61:70:[00-FF]``.
+as 02:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the
+actual MAC address: ``02:64:74:61:70:[00-FF]``.
 
-   --vdev=net_tap0,mac="00:64:74:61:70:11"
+   --vdev=net_tap0,mac="02:64:74:61:70:11"
 
 The MAC address will have a user value passed as string. The MAC address is in
 format with delimiter ``:``. The string is byte converted to hex and you get
-the actual MAC address: ``00:64:74:61:70:11``.
+the actual MAC address: ``02:64:74:61:70:11``.
 
 It is possible to specify a remote netdevice to capture packets from by adding
 ``remote=foo1``, for example::
diff -Nru dpdk-20.11.8/doc/guides/platform/octeontx2.rst dpdk-20.11.9/doc/guides/platform/octeontx2.rst
--- dpdk-20.11.8/doc/guides/platform/octeontx2.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/platform/octeontx2.rst	2023-08-15 16:54:57.000000000 +0100
@@ -249,7 +249,7 @@
 
 Enable ``debugfs`` by:
 
-1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUGFS=y``.
+1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUG_FS=y``.
 2. Boot OCTEON TX2 with debugfs supported kernel.
 3. Verify ``debugfs`` mounted by default "mount | grep -i debugfs" or mount it manually by using.
 
diff -Nru dpdk-20.11.8/doc/guides/prog_guide/event_timer_adapter.rst dpdk-20.11.9/doc/guides/prog_guide/event_timer_adapter.rst
--- dpdk-20.11.8/doc/guides/prog_guide/event_timer_adapter.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/prog_guide/event_timer_adapter.rst	2023-08-15 16:54:57.000000000 +0100
@@ -217,9 +217,7 @@
 RTE_EVENT_TIMER_NOT_ARMED.  Also note that we have saved a pointer to the
 ``conn`` object in the timer's event payload. This will allow us to locate
 the connection object again once we dequeue the timer expiry event from the
-event device later.  As a convenience, the application may specify no value for
-ev.event_ptr, and the adapter will by default set it to point at the event
-timer itself.
+event device later.
 
 Now we can arm the event timer with ``rte_event_timer_arm_burst()``:
 
diff -Nru dpdk-20.11.8/doc/guides/prog_guide/graph_lib.rst dpdk-20.11.9/doc/guides/prog_guide/graph_lib.rst
--- dpdk-20.11.8/doc/guides/prog_guide/graph_lib.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/prog_guide/graph_lib.rst	2023-08-15 16:54:57.000000000 +0100
@@ -173,7 +173,7 @@
 ~~~~~~~~~~~~~~~~~~~~~~~
 Now that the nodes are linked, Its time to create a graph by including
 the required nodes. The application can provide a set of node patterns to
-form a graph object. The ``famish()`` API used underneath for the pattern
+form a graph object. The ``fnmatch()`` API used underneath for the pattern
 matching to include the required nodes. After the graph create any changes to
 nodes or graph is not allowed.
 
diff -Nru dpdk-20.11.8/doc/guides/prog_guide/rte_flow.rst dpdk-20.11.9/doc/guides/prog_guide/rte_flow.rst
--- dpdk-20.11.8/doc/guides/prog_guide/rte_flow.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/prog_guide/rte_flow.rst	2023-08-15 16:54:57.000000000 +0100
@@ -117,14 +117,15 @@
 Flow rules can be grouped by assigning them a common group number. Groups
 allow a logical hierarchy of flow rule groups (tables) to be defined. These
 groups can be supported virtually in the PMD or in the physical device.
-Group 0 is the default group and this is the only group which flows are
-guarantee to matched against, all subsequent groups can only be reached by
-way of the JUMP action from a matched flow rule.
+Group 0 is the default group and is the only group that
+flows are guaranteed to be matched against.
+All subsequent groups can only be reached by using a JUMP action
+from a matched flow rule.
 
 Although optional, applications are encouraged to group similar rules as
 much as possible to fully take advantage of hardware capabilities
 (e.g. optimized matching) and work around limitations (e.g. a single pattern
-type possibly allowed in a given group), while being aware that the groups
+type possibly allowed in a given group), while being aware that the groups'
 hierarchies must be programmed explicitly.
 
 Note that support for more than a single group is not guaranteed.
@@ -139,7 +140,7 @@
 not need to be contiguous nor start from 0, however the maximum number
 varies between devices and may be affected by existing flow rules.
 
-A flow which matches multiple rules in the same group will always matched by
+A flow which matches multiple rules in the same group will always be matched by
 the rule with the highest priority in that group.
 
 If a packet is matched by several rules of a given group for a given
@@ -1610,12 +1611,12 @@
 the specified group on that device.
 
 If a matched flow is redirected to a table which doesn't contain a matching
-rule for that flow then the behavior is undefined and the resulting behavior
-is up to the specific device. Best practice when using groups would be define
+rule for that flow, then the behavior is undefined and the resulting behavior
+is up to the specific device. Best practice when using groups would be to define
 a default flow rule for each group which a defines the default actions in that
 group so a consistent behavior is defined.
 
-Defining an action for matched flow in a group to jump to a group which is
+Defining an action for a matched flow in a group to jump to a group which is
 higher in the group hierarchy may not be supported by physical devices,
 depending on how groups are mapped to the physical devices. In the
 definitions of jump actions, applications should be aware that it may be
@@ -1800,8 +1801,8 @@
   level.
 
 - ``2`` and subsequent values request RSS to be performed on the specified
-   inner packet encapsulation level, from outermost to innermost (lower to
-   higher values).
+  inner packet encapsulation level, from outermost to innermost (lower to
+  higher values).
 
 Values other than ``0`` are not necessarily supported.
 
diff -Nru dpdk-20.11.8/doc/guides/rawdevs/ntb.rst dpdk-20.11.9/doc/guides/rawdevs/ntb.rst
--- dpdk-20.11.8/doc/guides/rawdevs/ntb.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/rawdevs/ntb.rst	2023-08-15 16:54:57.000000000 +0100
@@ -1,6 +1,8 @@
 ..  SPDX-License-Identifier: BSD-3-Clause
     Copyright(c) 2018 Intel Corporation.
 
+.. include:: <isonum.txt>
+
 NTB Rawdev Driver
 =================
 
@@ -17,19 +19,23 @@
 BIOS setting on Intel Xeon
 --------------------------
 
-Intel Non-transparent Bridge needs special BIOS setting. The reference for
-Skylake is https://www.intel.com/content/dam/support/us/en/documents/server-products/Intel_Xeon_Processor_Scalable_Family_BIOS_User_Guide.pdf
-
-- Set the needed PCIe port as NTB to NTB mode on both hosts.
-- Enable NTB bars and set bar size of bar 23 and bar 45 as 12-29 (4K-512M)
-  on both hosts (for Ice Lake, bar size can be set as 12-51, namely 4K-128PB).
-  Note that bar size on both hosts should be the same.
-- Disable split bars for both hosts.
-- Set crosslink control override as DSD/USP on one host, USD/DSP on
-  another host.
-- Disable PCIe PII SSC (Spread Spectrum Clocking) for both hosts. This
-  is a hardware requirement.
-
+Intel Non-transparent Bridge (NTB) needs special BIOS settings on both systems.
+Note that for 4th Generation Intel\ |reg| Xeon\ |reg| Scalable Processors,
+option ``Port Subsystem Mode`` should be changed from ``Gen5`` to ``Gen4 Only``,
+then reboot.
+
+- Set ``Non-Transparent Bridge PCIe Port Definition`` for needed PCIe ports
+  as ``NTB to NTB`` mode, on both hosts.
+- Set ``Enable NTB BARs`` as ``Enabled``, on both hosts.
+- Set ``Enable SPLIT BARs`` as ``Disabled``, on both hosts.
+- Set ``Imbar1 Size``, ``Imbar2 Size``, ``Embar1 Size`` and ``Embar2 Size``,
+  as 12-29 (i.e., 4K-512M) for 2nd Generation Intel\ |reg| Xeon\ |reg| Scalable Processors;
+  as 12-51 (i.e., 4K-128PB) for 3rd and 4th Generation Intel\ |reg| Xeon\ |reg| Scalable Processors.
+  Note that those bar sizes on both hosts should be the same.
+- Set ``Crosslink Control override`` as ``DSD/USP`` on one host,
+  ``USD/DSP`` on another host.
+- Set ``PCIe PLL SSC (Spread Spectrum Clocking)`` as ``Disabled``, on both hosts.
+  This is a hardware requirement when using Re-timer Cards.
 
 Device Setup
 ------------
@@ -145,4 +151,8 @@
 Limitation
 ----------
 
-- This PMD only supports Intel Skylake and Ice Lake platforms.
+This PMD is only supported on Intel Xeon Platforms:
+
+- 4th Generation Intel® Xeon® Scalable Processors.
+- 3rd Generation Intel® Xeon® Scalable Processors.
+- 2nd Generation Intel® Xeon® Scalable Processors.
diff -Nru dpdk-20.11.8/doc/guides/rel_notes/release_20_11.rst dpdk-20.11.9/doc/guides/rel_notes/release_20_11.rst
--- dpdk-20.11.8/doc/guides/rel_notes/release_20_11.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/rel_notes/release_20_11.rst	2023-08-15 16:54:57.000000000 +0100
@@ -4155,3 +4155,232 @@
 
 * testpmd
    * a "OP_DEL_RSS_INPUT_CFG" error can sometimes appear when exiting
+
+20.11.9 Release Notes
+---------------------
+
+
+20.11.9 Fixes
+~~~~~~~~~~~~~
+
+* app/crypto-perf: fix socket ID default value
+* app/testpmd: fix checksum engine with GTP on 32-bit
+* app/testpmd: fix GTP L2 length in checksum engine
+* baseband/fpga_5gnr_fec: fix possible division by zero
+* baseband/fpga_5gnr_fec: fix starting unconfigured queue
+* build: fix case of project language name
+* common/iavf: fix MAC type for 710 NIC
+* common/mlx5: adjust fork call with new kernel API
+* common/sfc_efx/base: fix Rx queue without RSS hash prefix
+* crypto/openssl: skip workaround at compilation time
+* crypto/scheduler: fix last element for valid args
+* doc: fix auth algos in cryptoperf app
+* doc: fix event timer adapter guide
+* doc: fix format in flow API guide
+* doc: fix typo in cnxk platform guide
+* doc: fix typo in graph guide
+* doc: fix typos and wording in flow API guide
+* doc: remove warning with Doxygen 1.9.7
+* doc: update BIOS settings and supported HW for NTB
+* eal: avoid calling cleanup twice
+* eal/linux: fix legacy mem init with many segments
+* eal/linux: fix secondary process crash for mp hotplug requests
+* eal/x86: improve multiple of 64 bytes memcpy performance
+* ethdev: check that at least one FEC mode is specified
+* ethdev: fix MAC address occupies two entries
+* ethdev: fix potential leak in PCI probing helper
+* ethdev: update documentation for API to get FEC
+* ethdev: update documentation for API to set FEC
+* eventdev/timer: fix buffer flush
+* eventdev/timer: fix timeout event wait behavior
+* event/dsw: free rings on close
+* examples/fips_validation: fix digest length in AES-GCM
+* examples/ip_pipeline: fix build with GCC 13
+* examples/ipsec-secgw: fix TAP default MAC address
+* examples/l2fwd-cat: fix external build
+* examples/ntb: fix build with GCC 13
+* fib: fix adding default route
+* ipc: fix file descriptor leakage with unhandled messages
+* kernel/freebsd: fix function parameter list
+* kni: fix build with Linux 6.3
+* kni: fix build with Linux 6.5
+* mbuf: fix Doxygen comment of distributor metadata
+* mem: fix memsegs exhausted message
+* net/bonding: fix destroy dedicated queues flow
+* net/bonding: fix startup when NUMA is not supported
+* net/dpaa2: fix checksum good flags
+* net/e1000: fix queue number initialization
+* net/e1000: fix Rx and Tx queue status
+* net/hns3: fix build warning
+* net/hns3: fix device start return value
+* net/hns3: fix FEC mode check
+* net/hns3: fix FEC mode for 200G ports
+* net/hns3: fix IMP reset trigger
+* net/hns3: fix inaccurate log
+* net/hns3: fix index to look up table in NEON Rx
+* net/hns3: fix mbuf leakage when RxQ started after reset
+* net/hns3: fix mbuf leakage when RxQ started during reset
+* net/hns3: fix non-zero weight for disabled TC
+* net/hns3: fix redundant line break in log
+* net/hns3: fix Rx multiple firmware reset interrupts
+* net/hns3: fix uninitialized variable
+* net/hns3: fix variable type mismatch
+* net/i40e: fix comments
+* net/i40e: fix Rx data buffer size
+* net/i40e: fix tunnel packet Tx descriptor
+* net/iavf: fix abnormal disable HW interrupt
+* net/iavf: fix Rx data buffer size
+* net/iavf: fix stop ordering
+* net/iavf: fix virtchnl command called in interrupt
+* net/iavf: release large VF when closing device
+* net/ice/base: remove unreachable code
+* net/ice: fix 32-bit build
+* net/ice: fix DCF control thread crash
+* net/ice: fix DCF RSS initialization
+* net/ice: fix outer UDP checksum offload
+* net/ice: fix RSS hash key generation
+* net/ice: fix Rx data buffer size
+* net/ice: fix statistics
+* net/ice: fix tunnel packet Tx descriptor
+* net/igc: fix Rx and Tx queue status
+* net/ixgbe: add proper memory barriers in Rx
+* net/ixgbe: fix Rx and Tx queue status
+* net/mlx5: enhance error log for tunnel offloading
+* net/mlx5: fix device removal event handling
+* net/mlx5: fix duplicated tag index matching in SWS
+* net/mlx5: fix LRO TCP checksum
+* net/mlx5: fix MPRQ stride size to accommodate the headroom
+* net/mlx5: fix risk in NEON Rx descriptor read
+* net/mlx5: forbid MPRQ restart
+* net/netvsc: fix sizeof calculation
+* net/nfp: fix address always related with PF ID 0
+* net/nfp: fix offloading flows
+* net/nfp: fix Tx descriptor free logic of NFD3
+* net/qede: fix RSS indirection table initialization
+* net/tap: set locally administered bit for fixed MAC address
+* net/txgbe/base: fix Tx with fiber hotplug
+* net/txgbe: fix extended statistics
+* net/txgbe: fix to set autoneg for 1G speed
+* net/txgbe: fix use-after-free on remove
+* net/virtio: fix initialization to return negative errno
+* net/virtio-user: fix leak when initialisation fails
+* net/vmxnet3: fix drop of empty segments in Tx
+* net/vmxnet3: fix return code in initializing
+* pci: fix comment referencing renamed function
+* ring: fix use after free
+* telemetry: fix autotest on Alpine
+* test: add graph tests
+* test/bonding: fix include of standard header
+* test/crypto: fix PDCP-SDAP test vectors
+* test/crypto: fix return value for SNOW3G
+* test/crypto: fix session creation check
+* test/malloc: fix missing free
+* test/malloc: fix statistics checks
+* test/mbuf: fix crash in a forked process
+* version: 20.11.9-rc1
+* vfio: fix include with musl runtime
+* vhost: fix invalid call FD handling
+
+20.11.9 Validation
+~~~~~~~~~~~~~~~~~~
+
+* Intel(R) Testing
+
+   * Basic Intel(R) NIC testing
+      * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Fedora38, RHEL9.2, etc.
+      * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
+      * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc.
+      * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc.
+      * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc.
+      * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc.
+
+   * Basic cryptodev and virtio testing
+      * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc.
+      * Cryptodev:
+         * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc.
+         * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc.
+
+
+* Red Hat(R) Testing
+
+   * Platform
+
+      * RHEL 9
+      * Kernel 5.14
+      * Qemu 6.2.0
+      * X540-AT2 NIC(ixgbe, 10G)
+
+   * Functionality
+
+      * Guest with device assignment(PF) throughput testing(1G hugepage size)
+      * Guest with device assignment(PF) throughput testing(2M hugepage size)
+      * Guest with device assignment(VF) throughput testing
+      * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing
+      * PVP vhost-user 2Q throughput testing
+      * PVP vhost-user 1Q cross numa node  throughput testing
+      * Guest with vhost-user 2 queues throughput testing
+      * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect
+      * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect
+      * PVP  reconnect with dpdk-client, qemu-server: PASS
+      * PVP 1Q live migration testing
+      * PVP 1Q cross numa node live migration testing
+      * Guest with ovs+dpdk+vhost-user 1Q live migration testing
+      * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M)
+      * Guest with ovs+dpdk+vhost-user 2Q live migration testing
+      * Guest with ovs+dpdk+vhost-user 4Q live migration testing
+      * Host PF + DPDK testing
+      * Host VF + DPDK testing
+
+
+* Nvidia(R) Testing
+
+   * Basic functionality via testpmd/example applications
+
+      * Tx/Rx
+      * xstats
+      * Timestamps
+      * Link status
+      * RTE flow and flow_director
+      * RSS
+      * VLAN filtering, stripping and insertion
+      * Checksum/TSO
+      * ptype
+      * link_status_interrupt example application
+      * l3fwd-power example application
+      * Multi-process example applications
+      * Hardware LRO tests
+
+   * Build tests
+
+      * Ubuntu 20.04.6 with MLNX_OFED_LINUX-23.04-1.1.3.
+      * Ubuntu 20.04.6 with rdma-core master (aba30bd).
+      * Ubuntu 20.04.6 with rdma-core v28.0.
+      * Ubuntu 18.04.6 with rdma-core master (aba30bd) (i386).
+      * Fedora 38 with rdma-core v44.0.
+      * Fedora 39 (Rawhide) with rdma-core v46.0.
+      * OpenSUSE Leap 15.5 with rdma-core v42.0.
+      * Windows Server 2019 with Clang 11.0.0.
+
+   * BlueField-2
+
+      * DOCA 2.0.2
+      * fw 24.37.1300
+
+   * ConnectX-7
+
+      * Ubuntu 20.04
+      * Driver MLNX_OFED_LINUX-23.04-1.1.3.0
+      * fw 28.37.1014
+
+   * ConnectX-6 Dx
+
+      * Ubuntu 20.04
+      * Driver MLNX_OFED_LINUX-23.04-1.1.3.0
+      * fw 22.37.1014
+
+20.11.9 Known Issues
+~~~~~~~~~~~~~~~~~~~~
+
+* testpmd
+   * With the MLX PMD, multi-Packet Rx queue parameters are incorrectly adjusted
+     and not applied properly. See: https://bugs.dpdk.org/show_bug.cgi?id=1274
diff -Nru dpdk-20.11.8/doc/guides/tools/cryptoperf.rst dpdk-20.11.9/doc/guides/tools/cryptoperf.rst
--- dpdk-20.11.8/doc/guides/tools/cryptoperf.rst	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/doc/guides/tools/cryptoperf.rst	2023-08-15 16:54:57.000000000 +0100
@@ -228,7 +228,6 @@
         Set authentication algorithm name, where ``name`` is one
         of the following::
 
-           3des-cbc
            aes-cbc-mac
            aes-cmac
            aes-gmac
diff -Nru dpdk-20.11.8/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c dpdk-20.11.9/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
--- dpdk-20.11.8/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c	2023-08-15 16:54:57.000000000 +0100
@@ -563,17 +563,21 @@
 fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
 {
 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
+			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+	uint8_t enable = 0x01;
+	uint16_t zero = 0x0000;
 #ifdef RTE_LIBRTE_BBDEV_DEBUG
 	if (d == NULL) {
 		rte_bbdev_log(ERR, "Invalid device pointer");
 		return -1;
 	}
 #endif
-	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
-	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
-			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
-	uint8_t enable = 0x01;
-	uint16_t zero = 0x0000;
+	if (dev->data->queues[queue_id].queue_private == NULL) {
+		rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id);
+		return -1;
+	}
 
 	/* Clear queue head and tail variables */
 	q->tail = q->head_free_desc = 0;
@@ -878,9 +882,11 @@
 static inline uint16_t
 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
 {
+	uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
 	if (rv_index == 0)
 		return 0;
-	uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
+	if (z_c == 0)
+		return 0;
 	if (n_cb == n) {
 		if (rv_index == 1)
 			return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
diff -Nru dpdk-20.11.8/drivers/bus/pci/linux/pci_vfio.c dpdk-20.11.9/drivers/bus/pci/linux/pci_vfio.c
--- dpdk-20.11.8/drivers/bus/pci/linux/pci_vfio.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/bus/pci/linux/pci_vfio.c	2023-08-15 16:54:57.000000000 +0100
@@ -2,6 +2,7 @@
  * Copyright(c) 2010-2014 Intel Corporation
  */
 
+#include <unistd.h>
 #include <string.h>
 #include <fcntl.h>
 #include <linux/pci_regs.h>
diff -Nru dpdk-20.11.8/drivers/common/iavf/iavf_common.c dpdk-20.11.9/drivers/common/iavf/iavf_common.c
--- dpdk-20.11.8/drivers/common/iavf/iavf_common.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/common/iavf/iavf_common.c	2023-08-15 16:54:57.000000000 +0100
@@ -27,6 +27,8 @@
 			break;
 		case IAVF_DEV_ID_VF:
 		case IAVF_DEV_ID_VF_HV:
+			hw->mac.type = IAVF_MAC_XL710;
+			break;
 		case IAVF_DEV_ID_ADAPTIVE_VF:
 			hw->mac.type = IAVF_MAC_VF;
 			break;
diff -Nru dpdk-20.11.8/drivers/common/mlx5/linux/meson.build dpdk-20.11.9/drivers/common/mlx5/linux/meson.build
--- dpdk-20.11.8/drivers/common/mlx5/linux/meson.build	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/common/mlx5/linux/meson.build	2023-08-15 16:54:57.000000000 +0100
@@ -190,6 +190,8 @@
         'mlx5dv_dr_action_create_aso' ],
         [ 'HAVE_MLX5_IBV_REG_MR_IOVA', 'infiniband/verbs.h',
             'ibv_reg_mr_iova' ],
+        [ 'HAVE_IBV_FORK_UNNEEDED', 'infiniband/verbs.h',
+            'ibv_is_fork_initialized'],
 ]
 config = configuration_data()
 foreach arg:has_sym_args
diff -Nru dpdk-20.11.8/drivers/common/mlx5/linux/mlx5_glue.c dpdk-20.11.9/drivers/common/mlx5/linux/mlx5_glue.c
--- dpdk-20.11.8/drivers/common/mlx5/linux/mlx5_glue.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/common/mlx5/linux/mlx5_glue.c	2023-08-15 16:54:57.000000000 +0100
@@ -19,6 +19,10 @@
 static int
 mlx5_glue_fork_init(void)
 {
+#ifdef HAVE_IBV_FORK_UNNEEDED
+	if (ibv_is_fork_initialized() == IBV_FORK_UNNEEDED)
+		return 0; /* ibv_fork_init() not needed */
+#endif
 	return ibv_fork_init();
 }
 
diff -Nru dpdk-20.11.8/drivers/common/sfc_efx/base/efx_rx.c dpdk-20.11.9/drivers/common/sfc_efx/base/efx_rx.c
--- dpdk-20.11.8/drivers/common/sfc_efx/base/efx_rx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/common/sfc_efx/base/efx_rx.c	2023-08-15 16:54:57.000000000 +0100
@@ -896,8 +896,10 @@
 
 		rss_hash_field =
 		    &erplp->erpl_fields[EFX_RX_PREFIX_FIELD_RSS_HASH];
-		if (rss_hash_field->erpfi_width_bits == 0)
+		if (rss_hash_field->erpfi_width_bits == 0) {
+			rc = ENOTSUP;
 			goto fail5;
+		}
 	}
 
 	enp->en_rx_qcount++;
diff -Nru dpdk-20.11.8/drivers/crypto/openssl/rte_openssl_pmd.c dpdk-20.11.9/drivers/crypto/openssl/rte_openssl_pmd.c
--- dpdk-20.11.8/drivers/crypto/openssl/rte_openssl_pmd.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/crypto/openssl/rte_openssl_pmd.c	2023-08-15 16:54:57.000000000 +0100
@@ -1061,8 +1061,11 @@
 		int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
 		uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
 {
-	int len = 0, unused = 0;
+	int len = 0;
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+	int unused = 0;
 	uint8_t empty[] = {};
+#endif
 
 	if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
 		goto process_auth_encryption_gcm_err;
@@ -1076,9 +1079,11 @@
 				srclen, ctx, 0))
 			goto process_auth_encryption_gcm_err;
 
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
 	/* Workaround open ssl bug in version less then 1.0.1f */
 	if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
 		goto process_auth_encryption_gcm_err;
+#endif
 
 	if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
 		goto process_auth_encryption_gcm_err;
@@ -1140,8 +1145,11 @@
 		int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
 		uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
 {
-	int len = 0, unused = 0;
+	int len = 0;
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+	int unused = 0;
 	uint8_t empty[] = {};
+#endif
 
 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
 		goto process_auth_decryption_gcm_err;
@@ -1158,9 +1166,11 @@
 				srclen, ctx, 0))
 			goto process_auth_decryption_gcm_err;
 
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
 	/* Workaround open ssl bug in version less then 1.0.1f */
 	if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
 		goto process_auth_decryption_gcm_err;
+#endif
 
 	if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
 		return -EFAULT;
diff -Nru dpdk-20.11.8/drivers/crypto/scheduler/scheduler_pmd.c dpdk-20.11.9/drivers/crypto/scheduler/scheduler_pmd.c
--- dpdk-20.11.8/drivers/crypto/scheduler/scheduler_pmd.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/crypto/scheduler/scheduler_pmd.c	2023-08-15 16:54:57.000000000 +0100
@@ -47,7 +47,8 @@
 	RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
 	RTE_CRYPTODEV_VDEV_SOCKET_ID,
 	RTE_CRYPTODEV_VDEV_COREMASK,
-	RTE_CRYPTODEV_VDEV_CORELIST
+	RTE_CRYPTODEV_VDEV_CORELIST,
+	NULL
 };
 
 struct scheduler_parse_map {
diff -Nru dpdk-20.11.8/drivers/event/dsw/dsw_evdev.c dpdk-20.11.9/drivers/event/dsw/dsw_evdev.c
--- dpdk-20.11.8/drivers/event/dsw/dsw_evdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/event/dsw/dsw_evdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -366,6 +366,10 @@
 dsw_close(struct rte_eventdev *dev)
 {
 	struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+	uint16_t port_id;
+
+	for (port_id = 0; port_id < dsw->num_ports; port_id++)
+		dsw_port_release(&dsw->ports[port_id]);
 
 	dsw->num_ports = 0;
 	dsw->num_queues = 0;
diff -Nru dpdk-20.11.8/drivers/net/bonding/rte_eth_bond_api.c dpdk-20.11.9/drivers/net/bonding/rte_eth_bond_api.c
--- dpdk-20.11.8/drivers/net/bonding/rte_eth_bond_api.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/bonding/rte_eth_bond_api.c	2023-08-15 16:54:57.000000000 +0100
@@ -703,6 +703,16 @@
 		}
 	}
 
+	/* Remove the dedicated queues flow */
+	if (internals->mode == BONDING_MODE_8023AD &&
+		internals->mode4.dedicated_queues.enabled == 1 &&
+		internals->mode4.dedicated_queues.flow[slave_port_id] != NULL) {
+		rte_flow_destroy(slave_port_id,
+				internals->mode4.dedicated_queues.flow[slave_port_id],
+				&flow_error);
+		internals->mode4.dedicated_queues.flow[slave_port_id] = NULL;
+	}
+
 	slave_eth_dev = &rte_eth_devices[slave_port_id];
 	slave_remove(internals, slave_eth_dev);
 	slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
diff -Nru dpdk-20.11.8/drivers/net/bonding/rte_eth_bond_args.c dpdk-20.11.9/drivers/net/bonding/rte_eth_bond_args.c
--- dpdk-20.11.8/drivers/net/bonding/rte_eth_bond_args.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/bonding/rte_eth_bond_args.c	2023-08-15 16:54:57.000000000 +0100
@@ -211,6 +211,12 @@
 	if (*endptr != 0 || errno != 0)
 		return -1;
 
+	/* SOCKET_ID_ANY also consider a valid socket id */
+	if ((int8_t)socket_id == SOCKET_ID_ANY) {
+		*(int *)extra_args = SOCKET_ID_ANY;
+		return 0;
+	}
+
 	/* validate socket id value */
 	if (socket_id >= 0 && socket_id < RTE_MAX_NUMA_NODES) {
 		*(int *)extra_args = (int)socket_id;
diff -Nru dpdk-20.11.8/drivers/net/bonding/rte_eth_bond_pmd.c dpdk-20.11.9/drivers/net/bonding/rte_eth_bond_pmd.c
--- dpdk-20.11.8/drivers/net/bonding/rte_eth_bond_pmd.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/bonding/rte_eth_bond_pmd.c	2023-08-15 16:54:57.000000000 +0100
@@ -3306,7 +3306,7 @@
 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
 {
 	const char *name = rte_vdev_device_name(dev);
-	uint8_t socket_id = dev->device.numa_node;
+	int socket_id = dev->device.numa_node;
 	struct bond_dev_private *internals = NULL;
 	struct rte_eth_dev *eth_dev = NULL;
 	uint32_t vlan_filter_bmp_size;
@@ -3506,7 +3506,7 @@
 	port_id = bond_alloc(dev, bonding_mode);
 	if (port_id < 0) {
 		RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
-				"socket %u.",	name, bonding_mode, socket_id);
+				"socket %d.",	name, bonding_mode, socket_id);
 		goto parse_error;
 	}
 	internals = rte_eth_devices[port_id].data->dev_private;
@@ -3531,7 +3531,7 @@
 
 	rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
 	RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
-			"socket %u.",	name, port_id, bonding_mode, socket_id);
+			"socket %d.",	name, port_id, bonding_mode, socket_id);
 	return 0;
 
 parse_error:
diff -Nru dpdk-20.11.8/drivers/net/dpaa2/dpaa2_rxtx.c dpdk-20.11.9/drivers/net/dpaa2/dpaa2_rxtx.c
--- dpdk-20.11.8/drivers/net/dpaa2/dpaa2_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/dpaa2/dpaa2_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -187,8 +187,12 @@
 
 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+	else
+		mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+	else
+		mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
 
 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
 	    L3_IP_1_MORE_FRAGMENT |
@@ -230,8 +234,12 @@
 
 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+	else
+		mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+	else
+		mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
 
 	if (dpaa2_enable_ts[mbuf->port]) {
 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
diff -Nru dpdk-20.11.8/drivers/net/e1000/em_ethdev.c dpdk-20.11.9/drivers/net/e1000/em_ethdev.c
--- dpdk-20.11.8/drivers/net/e1000/em_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/e1000/em_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -1079,8 +1079,8 @@
 	 * To avoid it we support just one RX queue for now (no RSS).
 	 */
 
-	dev_info->max_rx_queues = 1;
-	dev_info->max_tx_queues = 1;
+	dev_info->max_rx_queues = 2;
+	dev_info->max_tx_queues = 2;
 
 	dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
 	dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
diff -Nru dpdk-20.11.8/drivers/net/e1000/igb_rxtx.c dpdk-20.11.9/drivers/net/e1000/igb_rxtx.c
--- dpdk-20.11.8/drivers/net/e1000/igb_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/e1000/igb_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -1872,6 +1872,7 @@
 		if (txq != NULL) {
 			igb_tx_queue_release_mbufs(txq);
 			igb_reset_tx_queue(txq, dev);
+			dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 		}
 	}
 
@@ -1880,6 +1881,7 @@
 		if (rxq != NULL) {
 			igb_rx_queue_release_mbufs(rxq);
 			igb_reset_rx_queue(rxq);
+			dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 		}
 	}
 }
@@ -2461,6 +2463,7 @@
 		rxdctl |= ((rxq->hthresh & 0x1F) << 8);
 		rxdctl |= ((rxq->wthresh & 0x1F) << 16);
 		E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
@@ -2625,6 +2628,7 @@
 		txdctl |= ((txq->wthresh & 0x1F) << 16);
 		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
 		E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	/* Program the Transmit Control Register. */
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_dcb.c dpdk-20.11.9/drivers/net/hns3/hns3_dcb.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_dcb.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_dcb.c	2023-08-15 16:54:57.000000000 +0100
@@ -240,9 +240,9 @@
 static int
 hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw)
 {
-#define DEFAULT_TC_WEIGHT	1
 #define DEFAULT_TC_OFFSET	14
 	struct hns3_ets_tc_weight_cmd *ets_weight;
+	struct hns3_pg_info *pg_info;
 	struct hns3_cmd_desc desc;
 	uint8_t i;
 
@@ -250,13 +250,6 @@
 	ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data;
 
 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
-		struct hns3_pg_info *pg_info;
-
-		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
-
-		if (!(hw->hw_tc_map & BIT(i)))
-			continue;
-
 		pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid];
 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
 	}
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_ethdev.c dpdk-20.11.9/drivers/net/hns3/hns3_ethdev.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -44,6 +44,7 @@
 #define HNS3_VECTOR0_IMP_CMDQ_ERR_B	4U
 #define HNS3_VECTOR0_IMP_RD_POISON_B	5U
 #define HNS3_VECTOR0_ALL_MSIX_ERR_B	6U
+#define HNS3_VECTOR0_TRIGGER_IMP_RESET_B	7U
 
 #define HNS3_RESET_WAIT_MS	100
 #define HNS3_RESET_WAIT_CNT	200
@@ -83,8 +84,7 @@
 			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) },
 
-	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
-			      RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+	{ ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
 			      RTE_ETH_FEC_MODE_CAPA_MASK(RS) }
 };
 
@@ -227,6 +227,19 @@
 }
 
 static void
+hns3_delay_before_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr)
+{
+#define IMPRESET_WAIT_MS_TIME	5
+
+	if (event_type == HNS3_VECTOR0_EVENT_RST &&
+	    regclr & BIT(HNS3_VECTOR0_IMPRESET_INT_B) &&
+	    hw->revision >= PCI_REVISION_ID_HIP09_A) {
+		rte_delay_ms(IMPRESET_WAIT_MS_TIME);
+		hns3_dbg(hw, "wait firmware watchdog initialization completed.");
+	}
+}
+
+static void
 hns3_interrupt_handler(void *param)
 {
 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -239,6 +252,7 @@
 	hns3_pf_disable_irq0(hw);
 
 	event_cause = hns3_check_event_cause(hns, &clearval);
+	hns3_delay_before_clear_event_cause(hw, event_cause, clearval);
 	hns3_clear_event_cause(hw, event_cause, clearval);
 	/* vector 0 interrupt is shared with reset and mailbox source events. */
 	if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
@@ -2701,6 +2715,7 @@
 	struct rte_eth_link new_link;
 	int ret;
 
+	memset(&new_link, 0, sizeof(new_link));
 	/* When port is stopped, report link down. */
 	if (eth_dev->data->dev_started == 0) {
 		new_link.link_autoneg = mac->link_autoneg;
@@ -2716,7 +2731,6 @@
 		hns3_err(hw, "failed to get port link info, ret = %d.", ret);
 	}
 
-	memset(&new_link, 0, sizeof(new_link));
 	hns3_setup_linkstatus(eth_dev, &new_link);
 
 out:
@@ -4075,7 +4089,7 @@
 
 	if (cmdq_resp) {
 		PMD_INIT_LOG(ERR,
-			     "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
+			     "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.",
 			     cmdq_resp);
 		return -EIO;
 	}
@@ -5519,17 +5533,6 @@
 	return hns3_cmd_send(hw, &desc, 1);
 }
 
-static int
-hns3_imp_reset_cmd(struct hns3_hw *hw)
-{
-	struct hns3_cmd_desc desc;
-
-	hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false);
-	desc.data[0] = 0xeedd;
-
-	return hns3_cmd_send(hw, &desc, 1);
-}
-
 static void
 hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
 {
@@ -5547,7 +5550,9 @@
 
 	switch (reset_level) {
 	case HNS3_IMP_RESET:
-		hns3_imp_reset_cmd(hw);
+		val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+		hns3_set_bit(val, HNS3_VECTOR0_TRIGGER_IMP_RESET_B, 1);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
 		hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld",
 			  tv.tv_sec, tv.tv_usec);
 		break;
@@ -6082,53 +6087,50 @@
 	return cur_capa;
 }
 
-static bool
-is_fec_mode_one_bit_set(uint32_t mode)
-{
-	int cnt = 0;
-	uint8_t i;
-
-	for (i = 0; i < sizeof(mode); i++)
-		if (mode >> i & 0x1)
-			cnt++;
-
-	return cnt == 1 ? true : false;
-}
-
 static int
-hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
+hns3_fec_mode_valid(struct rte_eth_dev *dev, uint32_t mode)
 {
 #define FEC_CAPA_NUM 2
 	struct hns3_adapter *hns = dev->data->dev_private;
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
-	struct hns3_pf *pf = &hns->pf;
-
 	struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
-	uint32_t cur_capa;
 	uint32_t num = FEC_CAPA_NUM;
+	uint32_t cur_capa;
 	int ret;
 
-	ret = hns3_fec_get_capability(dev, fec_capa, num);
-	if (ret < 0)
-		return ret;
-
-	/* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */
-	if (!is_fec_mode_one_bit_set(mode)) {
-		hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
-			     "FEC mode should be only one bit set", mode);
+	if (__builtin_popcount(mode) != 1) {
+		hns3_err(hw, "FEC mode(0x%x) should be only one bit set", mode);
 		return -EINVAL;
 	}
 
+	ret = hns3_fec_get_capability(dev, fec_capa, num);
+	if (ret < 0)
+		return ret;
 	/*
 	 * Check whether the configured mode is within the FEC capability.
 	 * If not, the configured mode will not be supported.
 	 */
 	cur_capa = get_current_speed_fec_cap(hw, fec_capa);
-	if (!(cur_capa & mode)) {
-		hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
+	if ((cur_capa & mode) == 0) {
+		hns3_err(hw, "unsupported FEC mode(0x%x)", mode);
 		return -EINVAL;
 	}
 
+	return 0;
+}
+
+static int
+hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+	struct hns3_pf *pf = &hns->pf;
+	int ret;
+
+	ret = hns3_fec_mode_valid(dev, mode);
+	if (ret != 0)
+		return ret;
+
 	rte_spinlock_lock(&hw->lock);
 	ret = hns3_set_fec_hw(hw, mode);
 	if (ret) {
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_ethdev_vf.c dpdk-20.11.9/drivers/net/hns3/hns3_ethdev_vf.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_ethdev_vf.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_ethdev_vf.c	2023-08-15 16:54:57.000000000 +0100
@@ -2090,8 +2090,10 @@
 		return ret;
 
 	ret = hns3_init_queues(hns, reset_queue);
-	if (ret)
+	if (ret) {
 		hns3_err(hw, "failed to init queues, ret = %d.", ret);
+		return ret;
+	}
 
 	return hns3_restore_filter(hns);
 }
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_flow.c dpdk-20.11.9/drivers/net/hns3/hns3_flow.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_flow.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_flow.c	2023-08-15 16:54:57.000000000 +0100
@@ -1934,8 +1934,9 @@
 	if (ret != 0)
 		return ret;
 
-	hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64,
-		  old_tuple_fields, new_tuple_fields);
+	if (!cfg_global_tuple)
+		hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64,
+			  old_tuple_fields, new_tuple_fields);
 
 	return 0;
 }
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_regs.c dpdk-20.11.9/drivers/net/hns3/hns3_regs.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_regs.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_regs.c	2023-08-15 16:54:57.000000000 +0100
@@ -270,8 +270,9 @@
 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 	uint32_t *origin_data_ptr = data;
 	uint32_t reg_offset;
-	int reg_num;
-	int i, j;
+	size_t reg_num;
+	uint16_t j;
+	size_t i;
 
 	/* fetching per-PF registers values from PF PCIe register space */
 	reg_num = sizeof(cmdq_reg_addrs) / sizeof(uint32_t);
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_rxtx.c dpdk-20.11.9/drivers/net/hns3/hns3_rxtx.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -54,6 +54,8 @@
 				rxq->sw_ring[i].mbuf = NULL;
 			}
 		}
+		for (i = 0; i < rxq->rx_rearm_nb; i++)
+			rxq->sw_ring[rxq->rx_rearm_start + i].mbuf = NULL;
 	}
 
 	for (i = 0; i < rxq->bulk_mbuf_num; i++)
@@ -579,7 +581,7 @@
 
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret)
-		hns3_err(hw, "TQP enable fail, ret = %d", ret);
+		hns3_err(hw, "TQP %s fail, ret = %d", enable ? "enable" : "disable", ret);
 
 	return ret;
 }
@@ -1636,7 +1638,7 @@
 
 	ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);
 	if (ret) {
-		hns3_err(hw, "Fail to configure fake rx queues: %d", ret);
+		hns3_err(hw, "Fail to configure fake tx queues: %d", ret);
 		goto cfg_fake_tx_q_fail;
 	}
 
@@ -3997,6 +3999,13 @@
 		return -ENOTSUP;
 
 	rte_spinlock_lock(&hw->lock);
+
+	if (rte_atomic16_read(&hw->reset.resetting)) {
+		hns3_err(hw, "fail to start Rx queue during resetting.");
+		rte_spinlock_unlock(&hw->lock);
+		return -EIO;
+	}
+
 	ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
 	if (ret) {
 		hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
@@ -4005,6 +4014,9 @@
 		return ret;
 	}
 
+	if (rxq->sw_ring[0].mbuf != NULL)
+		hns3_rx_queue_release_mbufs(rxq);
+
 	ret = hns3_init_rxq(hns, rx_queue_id);
 	if (ret) {
 		hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
@@ -4043,6 +4055,13 @@
 		return -ENOTSUP;
 
 	rte_spinlock_lock(&hw->lock);
+
+	if (rte_atomic16_read(&hw->reset.resetting)) {
+		hns3_err(hw, "fail to stop Rx queue during resetting.");
+		rte_spinlock_unlock(&hw->lock);
+		return -EIO;
+	}
+
 	hns3_enable_rxq(rxq, false);
 
 	hns3_rx_queue_release_mbufs(rxq);
@@ -4065,6 +4084,13 @@
 		return -ENOTSUP;
 
 	rte_spinlock_lock(&hw->lock);
+
+	if (rte_atomic16_read(&hw->reset.resetting)) {
+		hns3_err(hw, "fail to start Tx queue during resetting.");
+		rte_spinlock_unlock(&hw->lock);
+		return -EIO;
+	}
+
 	ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
 	if (ret) {
 		hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
@@ -4091,6 +4117,13 @@
 		return -ENOTSUP;
 
 	rte_spinlock_lock(&hw->lock);
+
+	if (rte_atomic16_read(&hw->reset.resetting)) {
+		hns3_err(hw, "fail to stop Tx queue during resetting.");
+		rte_spinlock_unlock(&hw->lock);
+		return -EIO;
+	}
+
 	hns3_enable_txq(txq, false);
 	hns3_tx_queue_release_mbufs(txq);
 	/*
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_rxtx_vec_neon.h dpdk-20.11.9/drivers/net/hns3/hns3_rxtx_vec_neon.h
--- dpdk-20.11.8/drivers/net/hns3/hns3_rxtx_vec_neon.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_rxtx_vec_neon.h	2023-08-15 16:54:57.000000000 +0100
@@ -138,8 +138,8 @@
 	/* mask to shuffle from desc to mbuf's rx_descriptor_fields1 */
 	uint8x16_t shuf_desc_fields_msk = {
 		0xff, 0xff, 0xff, 0xff,  /* packet type init zero */
-		22, 23, 0xff, 0xff,      /* rx.pkt_len to rte_mbuf.pkt_len */
-		20, 21,	                 /* size to rte_mbuf.data_len */
+		20, 21, 0xff, 0xff,      /* rx.pkt_len to rte_mbuf.pkt_len */
+		22, 23,	                 /* size to rte_mbuf.data_len */
 		0xff, 0xff,	         /* rte_mbuf.vlan_tci init zero */
 		8, 9, 10, 11,	         /* rx.rss_hash to rte_mbuf.hash.rss */
 	};
diff -Nru dpdk-20.11.8/drivers/net/hns3/hns3_stats.c dpdk-20.11.9/drivers/net/hns3/hns3_stats.c
--- dpdk-20.11.8/drivers/net/hns3/hns3_stats.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/hns3/hns3_stats.c	2023-08-15 16:54:57.000000000 +0100
@@ -338,7 +338,7 @@
 	uint32_t stats_iterms;
 	uint64_t *desc_data;
 	uint32_t desc_num;
-	uint16_t i;
+	uint32_t i;
 	int ret;
 
 	/* The first desc has a 64-bit header, so need to consider it. */
diff -Nru dpdk-20.11.8/drivers/net/i40e/i40e_rxtx.c dpdk-20.11.9/drivers/net/i40e/i40e_rxtx.c
--- dpdk-20.11.8/drivers/net/i40e/i40e_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/i40e/i40e_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -265,10 +265,7 @@
 			union i40e_tx_offload tx_offload)
 {
 	/* Set MACLEN */
-	if (ol_flags & PKT_TX_TUNNEL_MASK)
-		*td_offset |= (tx_offload.outer_l2_len >> 1)
-				<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
-	else
+	if (!(ol_flags & PKT_TX_TUNNEL_MASK))
 		*td_offset |= (tx_offload.l2_len >> 1)
 			<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
@@ -1131,9 +1128,12 @@
 
 		/* Fill in tunneling parameters if necessary */
 		cd_tunneling_params = 0;
-		if (ol_flags & PKT_TX_TUNNEL_MASK)
+		if (ol_flags & PKT_TX_TUNNEL_MASK) {
+			td_offset |= (tx_offload.outer_l2_len >> 1)
+					<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 			i40e_parse_tunneling_params(ol_flags, tx_offload,
 						    &cd_tunneling_params);
+		}
 		/* Enable checksum offloading */
 		if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)
 			i40e_txd_enable_checksum(ol_flags, &td_cmd,
@@ -2818,6 +2818,8 @@
 		rxq->rx_hdr_len = 0;
 		rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
 			(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+		rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len,
+					  I40E_RX_MAX_DATA_BUF_SIZE);
 		rxq->hs_mode = i40e_header_split_none;
 		break;
 	}
diff -Nru dpdk-20.11.8/drivers/net/i40e/i40e_rxtx.h dpdk-20.11.9/drivers/net/i40e/i40e_rxtx.h
--- dpdk-20.11.8/drivers/net/i40e/i40e_rxtx.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/i40e/i40e_rxtx.h	2023-08-15 16:54:57.000000000 +0100
@@ -21,6 +21,9 @@
 /* In none-PXE mode QLEN must be whole number of 32 descriptors. */
 #define	I40E_ALIGN_RING_DESC	32
 
+/* Max data buffer size must be 16K - 128 bytes */
+#define I40E_RX_MAX_DATA_BUF_SIZE	(16 * 1024 - 128)
+
 #define	I40E_MIN_RING_DESC	64
 #define	I40E_MAX_RING_DESC	4096
 
@@ -162,7 +165,7 @@
 	bool q_set; /**< indicate if tx queue has been configured */
 	bool tx_deferred_start; /**< don't start this queue in dev start */
 	uint8_t dcb_tc;         /**< Traffic class of tx queue */
-	uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 	const struct rte_memzone *mz;
 };
 
diff -Nru dpdk-20.11.8/drivers/net/i40e/i40e_rxtx_vec_altivec.c dpdk-20.11.9/drivers/net/i40e/i40e_rxtx_vec_altivec.c
--- dpdk-20.11.8/drivers/net/i40e/i40e_rxtx_vec_altivec.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/i40e/i40e_rxtx_vec_altivec.c	2023-08-15 16:54:57.000000000 +0100
@@ -448,8 +448,6 @@
 
  /* Notice:
   * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
-  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
-  *   numbers of DD bits
   */
 uint16_t
 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
diff -Nru dpdk-20.11.8/drivers/net/i40e/i40e_rxtx_vec_neon.c dpdk-20.11.9/drivers/net/i40e/i40e_rxtx_vec_neon.c
--- dpdk-20.11.8/drivers/net/i40e/i40e_rxtx_vec_neon.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/i40e/i40e_rxtx_vec_neon.c	2023-08-15 16:54:57.000000000 +0100
@@ -447,8 +447,6 @@
  /*
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- *   numbers of DD bits
  */
 uint16_t
 i40e_recv_pkts_vec(void *__rte_restrict rx_queue,
diff -Nru dpdk-20.11.8/drivers/net/i40e/i40e_rxtx_vec_sse.c dpdk-20.11.9/drivers/net/i40e/i40e_rxtx_vec_sse.c
--- dpdk-20.11.8/drivers/net/i40e/i40e_rxtx_vec_sse.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/i40e/i40e_rxtx_vec_sse.c	2023-08-15 16:54:57.000000000 +0100
@@ -595,8 +595,6 @@
  /*
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- *   numbers of DD bits
  */
 uint16_t
 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
diff -Nru dpdk-20.11.8/drivers/net/iavf/iavf_ethdev.c dpdk-20.11.9/drivers/net/iavf/iavf_ethdev.c
--- dpdk-20.11.8/drivers/net/iavf/iavf_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/iavf/iavf_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -756,8 +756,6 @@
 	if (adapter->stopped == 1)
 		return 0;
 
-	iavf_stop_queues(dev);
-
 	/* Disable the interrupt for Rx */
 	rte_intr_efd_disable(intr_handle);
 	/* Rx interrupt vector mapping free */
@@ -773,6 +771,8 @@
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	iavf_stop_queues(dev);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -2011,6 +2011,9 @@
 	adapter->dev_data = eth_dev->data;
 	adapter->stopped = 1;
 
+	if (iavf_dev_event_handler_init())
+		return 0;
+
 	if (iavf_init_vf(eth_dev) != 0) {
 		PMD_INIT_LOG(ERR, "Init vf failed");
 		return -1;
@@ -2037,8 +2040,6 @@
 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
 			&eth_dev->data->mac_addrs[0]);
 
-	if (iavf_dev_event_handler_init())
-		return 0;
 
 	/* register callback func to eal lib */
 	rte_intr_callback_register(&pci_dev->intr_handle,
@@ -2076,6 +2077,18 @@
 
 	ret = iavf_dev_stop(dev);
 
+	/*
+	 * Release redundant queue resource when close the dev
+	 * so that other vfs can re-use the queues.
+	 */
+	if (vf->lv_enabled) {
+		ret = iavf_request_queues(dev, IAVF_MAX_NUM_QUEUES_DFLT);
+		if (ret)
+			PMD_DRV_LOG(ERR, "Reset the num of queues failed");
+
+		vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
+	}
+
 	iavf_flow_flush(dev, NULL);
 	iavf_flow_uninit(adapter);
 
diff -Nru dpdk-20.11.8/drivers/net/iavf/iavf_rxtx.c dpdk-20.11.9/drivers/net/iavf/iavf_rxtx.c
--- dpdk-20.11.8/drivers/net/iavf/iavf_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/iavf/iavf_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -583,6 +583,7 @@
 
 	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
+	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, IAVF_RX_MAX_DATA_BUF_SIZE);
 
 	/* Allocate the software ring. */
 	len = nb_desc + IAVF_RX_MAX_BURST;
diff -Nru dpdk-20.11.8/drivers/net/iavf/iavf_rxtx.h dpdk-20.11.9/drivers/net/iavf/iavf_rxtx.h
--- dpdk-20.11.8/drivers/net/iavf/iavf_rxtx.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/iavf/iavf_rxtx.h	2023-08-15 16:54:57.000000000 +0100
@@ -16,6 +16,9 @@
 /* used for Rx Bulk Allocate */
 #define IAVF_RX_MAX_BURST         32
 
+/* Max data buffer size must be 16K - 128 bytes */
+#define IAVF_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128)
+
 /* used for Vector PMD */
 #define IAVF_VPMD_RX_MAX_BURST    32
 #define IAVF_VPMD_TX_MAX_BURST    32
diff -Nru dpdk-20.11.8/drivers/net/iavf/iavf_vchnl.c dpdk-20.11.9/drivers/net/iavf/iavf_vchnl.c
--- dpdk-20.11.8/drivers/net/iavf/iavf_vchnl.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/iavf/iavf_vchnl.c	2023-08-15 16:54:57.000000000 +0100
@@ -254,6 +254,7 @@
 				vf->link_speed = iavf_convert_link_speed(speed);
 			}
 			iavf_dev_link_update(vf->eth_dev, 0);
+			iavf_dev_event_post(vf->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
 			PMD_DRV_LOG(INFO, "Link status update:%s",
 					vf->link_up ? "up" : "down");
 			break;
@@ -308,6 +309,7 @@
 
 	switch (args->ops) {
 	case VIRTCHNL_OP_RESET_VF:
+	case VIRTCHNL_OP_REQUEST_QUEUES:
 		/*no need to wait for response */
 		_clear_cmd(vf);
 		break;
@@ -330,56 +332,49 @@
 		}
 		_clear_cmd(vf);
 		break;
-	case VIRTCHNL_OP_REQUEST_QUEUES:
-		/*
-		 * ignore async reply, only wait for system message,
-		 * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
-		 * if not, means request queues failed.
-		 */
-		do {
-			result = iavf_read_msg_from_pf(adapter, args->out_size,
-						   args->out_buffer);
-			if (result == IAVF_MSG_SYS && vf->vf_reset) {
-				break;
-			} else if (result == IAVF_MSG_CMD ||
-				result == IAVF_MSG_ERR) {
+	default:
+		if (rte_thread_is_intr()) {
+			/* For virtchnl ops were executed in eal_intr_thread,
+			 * need to poll the response.
+			 */
+			do {
+				result = iavf_read_msg_from_pf(adapter, args->out_size,
+							args->out_buffer);
+				if (result == IAVF_MSG_CMD)
+					break;
+				iavf_msec_delay(ASQ_DELAY_MS);
+			} while (i++ < MAX_TRY_TIMES);
+			if (i >= MAX_TRY_TIMES ||
+				vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
 				err = -1;
-				break;
+				PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+						" for cmd %d", vf->cmd_retval, args->ops);
 			}
-			iavf_msec_delay(ASQ_DELAY_MS);
-			/* If don't read msg or read sys event, continue */
-		} while (i++ < MAX_TRY_TIMES);
-		if (i >= MAX_TRY_TIMES ||
-			vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
-			err = -1;
-			PMD_DRV_LOG(ERR, "No response or return failure (%d)"
-				    " for cmd %d", vf->cmd_retval, args->ops);
-		}
-		_clear_cmd(vf);
-		break;
-	default:
-		/* For other virtchnl ops in running time,
-		 * wait for the cmd done flag.
-		 */
-		do {
-			if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
-				break;
-			iavf_msec_delay(ASQ_DELAY_MS);
-			/* If don't read msg or read sys event, continue */
-		} while (i++ < MAX_TRY_TIMES);
-
-		if (i >= MAX_TRY_TIMES) {
-			PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops);
 			_clear_cmd(vf);
-			err = -EIO;
-		} else if (vf->cmd_retval ==
-			   VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) {
-			PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops);
-			err = -ENOTSUP;
-		} else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
-			PMD_DRV_LOG(ERR, "Return failure %d for cmd %d",
-				    vf->cmd_retval, args->ops);
-			err = -EINVAL;
+		} else {
+			/* For other virtchnl ops in running time,
+			 * wait for the cmd done flag.
+			 */
+			do {
+				if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
+					break;
+				iavf_msec_delay(ASQ_DELAY_MS);
+				/* If don't read msg or read sys event, continue */
+			} while (i++ < MAX_TRY_TIMES);
+
+			if (i >= MAX_TRY_TIMES) {
+				PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops);
+				_clear_cmd(vf);
+				err = -EIO;
+			} else if (vf->cmd_retval ==
+				VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) {
+				PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops);
+				err = -ENOTSUP;
+			} else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+				PMD_DRV_LOG(ERR, "Return failure %d for cmd %d",
+						vf->cmd_retval, args->ops);
+				err = -EINVAL;
+			}
 		}
 		break;
 	}
@@ -393,8 +388,14 @@
 {
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	int ret;
+	int is_intr_thread = rte_thread_is_intr();
 
-	rte_spinlock_lock(&vf->aq_lock);
+	if (is_intr_thread) {
+		if (!rte_spinlock_trylock(&vf->aq_lock))
+			return -EIO;
+	} else {
+		rte_spinlock_lock(&vf->aq_lock);
+	}
 	ret = iavf_execute_vf_cmd(adapter, args);
 	rte_spinlock_unlock(&vf->aq_lock);
 
@@ -1567,11 +1568,11 @@
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
-	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct virtchnl_vf_res_request vfres;
 	struct iavf_cmd_info args;
 	uint16_t num_queue_pairs;
 	int err;
+	int i = 0;
 
 	if (!(vf->vf_res->vf_cap_flags &
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
@@ -1591,22 +1592,19 @@
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	/*
-	 * disable interrupt to avoid the admin queue message to be read
-	 * before iavf_read_msg_from_pf.
-	 *
-	 * don't disable interrupt handler until ready to execute vf cmd.
-	 */
-	rte_spinlock_lock(&vf->aq_lock);
-	rte_intr_disable(&pci_dev->intr_handle);
-	err = iavf_execute_vf_cmd(adapter, &args);
-	rte_intr_enable(&pci_dev->intr_handle);
-	rte_spinlock_unlock(&vf->aq_lock);
+	err = iavf_execute_vf_cmd_safe(adapter, &args);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
 		return err;
 	}
 
+	/* wait for interrupt notification vf is resetting */
+	while (i++ < MAX_TRY_TIMES) {
+		if (vf->vf_reset)
+			break;
+		iavf_msec_delay(ASQ_DELAY_MS);
+	}
+
 	/* request queues succeeded, vf is resetting */
 	if (vf->vf_reset) {
 		PMD_DRV_LOG(INFO, "vf is resetting");
diff -Nru dpdk-20.11.8/drivers/net/ice/base/ice_sched.c dpdk-20.11.9/drivers/net/ice/base/ice_sched.c
--- dpdk-20.11.8/drivers/net/ice/base/ice_sched.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/base/ice_sched.c	2023-08-15 16:54:57.000000000 +0100
@@ -1370,11 +1370,6 @@
 	clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
 		GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
 
-#define PSM_CLK_SRC_367_MHZ 0x0
-#define PSM_CLK_SRC_416_MHZ 0x1
-#define PSM_CLK_SRC_446_MHZ 0x2
-#define PSM_CLK_SRC_390_MHZ 0x3
-
 	switch (clk_src) {
 	case PSM_CLK_SRC_367_MHZ:
 		hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
@@ -1388,11 +1383,12 @@
 	case PSM_CLK_SRC_390_MHZ:
 		hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
 		break;
-	default:
-		ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
-			  clk_src);
-		/* fall back to a safe default */
-		hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+
+	/* default condition is not required as clk_src is restricted
+	 * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask.
+	 * The above switch statements cover the possible values of
+	 * this variable.
+	 */
 	}
 }
 
diff -Nru dpdk-20.11.8/drivers/net/ice/base/ice_sched.h dpdk-20.11.9/drivers/net/ice/base/ice_sched.h
--- dpdk-20.11.8/drivers/net/ice/base/ice_sched.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/base/ice_sched.h	2023-08-15 16:54:57.000000000 +0100
@@ -35,6 +35,11 @@
 #define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
 #define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
 
+#define PSM_CLK_SRC_367_MHZ 0x0
+#define PSM_CLK_SRC_416_MHZ 0x1
+#define PSM_CLK_SRC_446_MHZ 0x2
+#define PSM_CLK_SRC_390_MHZ 0x3
+
 struct rl_profile_params {
 	u32 bw;			/* in Kbps */
 	u16 rl_multiplier;
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_dcf.c dpdk-20.11.9/drivers/net/ice/ice_dcf.c
--- dpdk-20.11.8/drivers/net/ice/ice_dcf.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_dcf.c	2023-08-15 16:54:57.000000000 +0100
@@ -32,6 +32,8 @@
 #define ICE_DCF_ARQ_MAX_RETRIES 200
 #define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
 
+#define ICE_DCF_CHECK_INTERVAL  100   /* 100ms */
+
 #define ICE_DCF_VF_RES_BUF_SZ	\
 	(sizeof(struct virtchnl_vf_resource) +	\
 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
@@ -609,6 +611,8 @@
 	rte_spinlock_init(&hw->vc_cmd_queue_lock);
 	TAILQ_INIT(&hw->vc_cmd_queue);
 
+	__atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+
 	hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
 	if (hw->arq_buf == NULL) {
 		PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
@@ -710,6 +714,11 @@
 	rte_intr_callback_unregister(intr_handle,
 				     ice_dcf_dev_interrupt_handler, hw);
 
+	/* Wait for all `ice-thread` threads to exit. */
+	while (__atomic_load_n(&hw->vsi_update_thread_num,
+		__ATOMIC_ACQUIRE) != 0)
+		rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
+
 	ice_dcf_mode_disable(hw);
 	iavf_shutdown_adminq(&hw->avf);
 
@@ -789,7 +798,8 @@
 {
 	struct rte_eth_dev *dev = hw->eth_dev;
 	struct rte_eth_rss_conf *rss_conf;
-	uint8_t i, j, nb_q;
+	uint8_t j, nb_q;
+	size_t i;
 	int ret;
 
 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_dcf_ethdev.c dpdk-20.11.9/drivers/net/ice/ice_dcf_ethdev.c
--- dpdk-20.11.8/drivers/net/ice/ice_dcf_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_dcf_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -52,7 +52,8 @@
 
 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 	rxq->rx_hdr_len = 0;
-	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
+	rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
+	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE);
 	max_pkt_len = RTE_MIN((uint32_t)
 			      ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
 			      dev->data->dev_conf.rxmode.max_rx_pkt_len);
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_dcf.h dpdk-20.11.9/drivers/net/ice/ice_dcf.h
--- dpdk-20.11.8/drivers/net/ice/ice_dcf.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_dcf.h	2023-08-15 16:54:57.000000000 +0100
@@ -39,6 +39,8 @@
 	void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
 				uint8_t *msg, uint16_t msglen);
 
+	int vsi_update_thread_num;
+
 	uint8_t *arq_buf;
 
 	uint16_t num_vfs;
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_dcf_parent.c dpdk-20.11.9/drivers/net/ice/ice_dcf_parent.c
--- dpdk-20.11.8/drivers/net/ice/ice_dcf_parent.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_dcf_parent.c	2023-08-15 16:54:57.000000000 +0100
@@ -115,6 +115,9 @@
 		container_of(hw, struct ice_dcf_adapter, real_hw);
 	struct ice_adapter *parent_adapter = &adapter->parent;
 
+	__atomic_fetch_add(&hw->vsi_update_thread_num, 1,
+		__ATOMIC_RELAXED);
+
 	pthread_detach(pthread_self());
 	usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL);
 
@@ -133,6 +136,9 @@
 
 	rte_spinlock_unlock(&vsi_update_lock);
 
+	__atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
+		__ATOMIC_RELEASE);
+
 	return NULL;
 }
 
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_ethdev.c dpdk-20.11.9/drivers/net/ice/ice_ethdev.c
--- dpdk-20.11.8/drivers/net/ice/ice_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -2270,6 +2270,9 @@
 
 	pf->supported_rxdid = ice_get_supported_rxdid(hw);
 
+	/* reset all stats of the device, including pf and main vsi */
+	ice_stats_reset(dev);
+
 	return 0;
 
 err_flow_init:
@@ -3223,7 +3226,8 @@
 
 	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
 	nb_q = dev_data->nb_rx_queues;
-	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
+	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE +
+			    ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE;
 	vsi->rss_lut_size = pf->hash_lut_size;
 
 	if (nb_q == 0) {
@@ -3264,7 +3268,10 @@
 				   vsi->rss_key_size));
 
 	rte_memcpy(key.standard_rss_key, vsi->rss_key,
-		RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size));
+		ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+	rte_memcpy(key.extended_hash_key,
+		&vsi->rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE],
+		ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE);
 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
 	if (ret)
 		goto out;
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_rxtx.c dpdk-20.11.9/drivers/net/ice/ice_rxtx.c
--- dpdk-20.11.8/drivers/net/ice/ice_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -248,7 +248,8 @@
 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
 			      RTE_PKTMBUF_HEADROOM);
 	rxq->rx_hdr_len = 0;
-	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
+	rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
+	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE);
 	rxq->max_pkt_len = RTE_MIN((uint32_t)
 				   ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
 				   dev_data->dev_conf.rxmode.max_rx_pkt_len);
@@ -2343,7 +2344,8 @@
 	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
 	 */
 	if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
-	    (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
+		(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) &&
+		(ol_flags & PKT_TX_OUTER_UDP_CKSUM))
 		*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
 }
 
@@ -2354,10 +2356,7 @@
 			union ice_tx_offload tx_offload)
 {
 	/* Set MACLEN */
-	if (ol_flags & PKT_TX_TUNNEL_MASK)
-		*td_offset |= (tx_offload.outer_l2_len >> 1)
-			<< ICE_TX_DESC_LEN_MACLEN_S;
-	else
+	if (!(ol_flags & PKT_TX_TUNNEL_MASK))
 		*td_offset |= (tx_offload.l2_len >> 1)
 			<< ICE_TX_DESC_LEN_MACLEN_S;
 
@@ -2617,9 +2616,12 @@
 
 		/* Fill in tunneling parameters if necessary */
 		cd_tunneling_params = 0;
-		if (ol_flags & PKT_TX_TUNNEL_MASK)
+		if (ol_flags & PKT_TX_TUNNEL_MASK) {
+			td_offset |= (tx_offload.outer_l2_len >> 1)
+				<< ICE_TX_DESC_LEN_MACLEN_S;
 			ice_parse_tunneling_params(ol_flags, tx_offload,
 						   &cd_tunneling_params);
+		}
 
 		/* Enable checksum offloading */
 		if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_rxtx.h dpdk-20.11.9/drivers/net/ice/ice_rxtx.h
--- dpdk-20.11.8/drivers/net/ice/ice_rxtx.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_rxtx.h	2023-08-15 16:54:57.000000000 +0100
@@ -42,6 +42,9 @@
 
 #define ICE_TX_MIN_PKT_LEN 17
 
+/* Max data buffer size must be 16K - 128 bytes */
+#define ICE_RX_MAX_DATA_BUF_SIZE	(16 * 1024 - 128)
+
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
 typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
 typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
diff -Nru dpdk-20.11.8/drivers/net/ice/ice_rxtx_vec_common.h dpdk-20.11.9/drivers/net/ice/ice_rxtx_vec_common.h
--- dpdk-20.11.8/drivers/net/ice/ice_rxtx_vec_common.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ice/ice_rxtx_vec_common.h	2023-08-15 16:54:57.000000000 +0100
@@ -72,7 +72,7 @@
 	/* save the partial packet for next time */
 	rxq->pkt_first_seg = start;
 	rxq->pkt_last_seg = end;
-	rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
 	return pkt_idx;
 }
 
diff -Nru dpdk-20.11.8/drivers/net/igc/igc_txrx.c dpdk-20.11.9/drivers/net/igc/igc_txrx.c
--- dpdk-20.11.8/drivers/net/igc/igc_txrx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/igc/igc_txrx.c	2023-08-15 16:54:57.000000000 +0100
@@ -1308,6 +1308,7 @@
 			dvmolr |= IGC_DVMOLR_STRCRC;
 
 		IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	return 0;
@@ -1951,6 +1952,7 @@
 		if (txq != NULL) {
 			igc_tx_queue_release_mbufs(txq);
 			igc_reset_tx_queue(txq);
+			dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 		}
 	}
 
@@ -1959,6 +1961,7 @@
 		if (rxq != NULL) {
 			igc_rx_queue_release_mbufs(rxq);
 			igc_reset_rx_queue(rxq);
+			dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 		}
 	}
 }
@@ -2204,6 +2207,7 @@
 				IGC_TXDCTL_WTHRESH_MSK;
 		txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
 		IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl);
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	igc_config_collision_dist(hw);
diff -Nru dpdk-20.11.8/drivers/net/ixgbe/ixgbe_rxtx.c dpdk-20.11.9/drivers/net/ixgbe/ixgbe_rxtx.c
--- dpdk-20.11.8/drivers/net/ixgbe/ixgbe_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/ixgbe/ixgbe_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -1787,11 +1787,22 @@
 		 * of accesses cannot be reordered by the compiler. If they were
 		 * not volatile, they could be reordered which could lead to
 		 * using invalid descriptor fields when read from rxd.
+		 *
+		 * Meanwhile, to prevent the CPU from executing out of order, we
+		 * need to use a proper memory barrier to ensure the memory
+		 * ordering below.
 		 */
 		rxdp = &rx_ring[rx_id];
 		staterr = rxdp->wb.upper.status_error;
 		if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
 			break;
+
+		/*
+		 * Use acquire fence to ensure that status_error which includes
+		 * DD bit is loaded before loading of other descriptor words.
+		 */
+		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
 		rxd = *rxdp;
 
 		/*
@@ -2058,32 +2069,10 @@
 
 next_desc:
 		/*
-		 * The code in this whole file uses the volatile pointer to
-		 * ensure the read ordering of the status and the rest of the
-		 * descriptor fields (on the compiler level only!!!). This is so
-		 * UGLY - why not to just use the compiler barrier instead? DPDK
-		 * even has the rte_compiler_barrier() for that.
-		 *
-		 * But most importantly this is just wrong because this doesn't
-		 * ensure memory ordering in a general case at all. For
-		 * instance, DPDK is supposed to work on Power CPUs where
-		 * compiler barrier may just not be enough!
-		 *
-		 * I tried to write only this function properly to have a
-		 * starting point (as a part of an LRO/RSC series) but the
-		 * compiler cursed at me when I tried to cast away the
-		 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
-		 * keeping it the way it is for now.
-		 *
-		 * The code in this file is broken in so many other places and
-		 * will just not work on a big endian CPU anyway therefore the
-		 * lines below will have to be revisited together with the rest
-		 * of the ixgbe PMD.
-		 *
-		 * TODO:
-		 *    - Get rid of "volatile" and let the compiler do its job.
-		 *    - Use the proper memory barrier (rte_rmb()) to ensure the
-		 *      memory ordering below.
+		 * "Volatile" only prevents caching of the variable marked
+		 * volatile. Most important, "volatile" cannot prevent the CPU
+		 * from executing out of order. So, it is necessary to use a
+		 * proper memory barrier to ensure the memory ordering below.
 		 */
 		rxdp = &rx_ring[rx_id];
 		staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
@@ -2091,6 +2080,12 @@
 		if (!(staterr & IXGBE_RXDADV_STAT_DD))
 			break;
 
+		/*
+		 * Use acquire fence to ensure that status_error which includes
+		 * DD bit is loaded before loading of other descriptor words.
+		 */
+		rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
 		rxd = *rxdp;
 
 		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
@@ -3373,6 +3368,7 @@
 		if (txq != NULL) {
 			txq->ops->release_mbufs(txq);
 			txq->ops->reset(txq);
+			dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 		}
 	}
 
@@ -3382,6 +3378,7 @@
 		if (rxq != NULL) {
 			ixgbe_rx_queue_release_mbufs(rxq);
 			ixgbe_reset_rx_queue(adapter, rxq);
+			dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 		}
 	}
 	/* If loopback mode was enabled, reconfigure the link accordingly */
@@ -5821,6 +5818,8 @@
 		} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
 		if (!poll_ms)
 			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+		else
+			dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 
@@ -5838,6 +5837,8 @@
 		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 		if (!poll_ms)
 			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+		else
+			dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
 
diff -Nru dpdk-20.11.8/drivers/net/mlx5/linux/mlx5_ethdev_os.c dpdk-20.11.9/drivers/net/mlx5/linux/mlx5_ethdev_os.c
--- dpdk-20.11.8/drivers/net/mlx5/linux/mlx5_ethdev_os.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/linux/mlx5_ethdev_os.c	2023-08-15 16:54:57.000000000 +0100
@@ -715,6 +715,7 @@
 
 	for (i = 0; i < sh->max_port; ++i) {
 		struct rte_eth_dev *dev;
+		struct mlx5_priv *priv;
 
 		if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) {
 			/*
@@ -725,9 +726,14 @@
 		}
 		dev = &rte_eth_devices[sh->port[i].ih_port_id];
 		MLX5_ASSERT(dev);
-		if (dev->data->dev_conf.intr_conf.rmv)
+		priv = dev->data->dev_private;
+		MLX5_ASSERT(priv);
+		if (!priv->rmv_notified && dev->data->dev_conf.intr_conf.rmv) {
+			/* Notify driver about removal only once. */
+			priv->rmv_notified = 1;
 			rte_eth_dev_callback_process
 				(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
+		}
 	}
 }
 
@@ -800,21 +806,29 @@
 		struct rte_eth_dev *dev;
 		uint32_t tmp;
 
-		if (mlx5_glue->get_async_event(sh->ctx, &event))
+		if (mlx5_glue->get_async_event(sh->ctx, &event)) {
+			if (errno == EIO) {
+				DRV_LOG(DEBUG,
+					"IBV async event queue closed on: %s",
+					sh->ibdev_name);
+				mlx5_dev_interrupt_device_fatal(sh);
+			}
 			break;
-		/* Retrieve and check IB port index. */
-		tmp = (uint32_t)event.element.port_num;
-		if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) {
+		}
+		if (event.event_type == IBV_EVENT_DEVICE_FATAL) {
 			/*
-			 * The DEVICE_FATAL event is called once for
-			 * entire device without port specifying.
-			 * We should notify all existing ports.
+			 * The DEVICE_FATAL event can be called by kernel
+			 * twice - from mlx5 and uverbs layers, and port
+			 * index is not applicable. We should notify all
+			 * existing ports.
 			 */
-			mlx5_glue->ack_async_event(&event);
 			mlx5_dev_interrupt_device_fatal(sh);
+			mlx5_glue->ack_async_event(&event);
 			continue;
 		}
-		MLX5_ASSERT(tmp && (tmp <= sh->max_port));
+		/* Retrieve and check IB port index. */
+		tmp = (uint32_t)event.element.port_num;
+		MLX5_ASSERT(tmp <= sh->max_port);
 		if (!tmp) {
 			/* Unsupported device level event. */
 			mlx5_glue->ack_async_event(&event);
diff -Nru dpdk-20.11.8/drivers/net/mlx5/linux/mlx5_os.c dpdk-20.11.9/drivers/net/mlx5/linux/mlx5_os.c
--- dpdk-20.11.8/drivers/net/mlx5/linux/mlx5_os.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/linux/mlx5_os.c	2023-08-15 16:54:57.000000000 +0100
@@ -1202,6 +1202,7 @@
 	config->mprq.log_min_stride_wqe_size =
 			MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
 	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+	config->mprq.log_stride_size = MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE;
 	if (config->devx) {
 		config->mprq.log_min_stride_wqe_size =
 				config->hca_attr.log_min_stride_wqe_sz;
diff -Nru dpdk-20.11.8/drivers/net/mlx5/mlx5_flow.c dpdk-20.11.9/drivers/net/mlx5/mlx5_flow.c
--- dpdk-20.11.8/drivers/net/mlx5/mlx5_flow.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/mlx5_flow.c	2023-08-15 16:54:57.000000000 +0100
@@ -7918,7 +7918,7 @@
 	if (!is_tunnel_offload_active(dev))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  "tunnel offload was not activated");
+					  "tunnel offload was not activated, consider setting dv_xmeta_en=3");
 	if (!tunnel)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
diff -Nru dpdk-20.11.8/drivers/net/mlx5/mlx5_flow_dv.c dpdk-20.11.9/drivers/net/mlx5/mlx5_flow_dv.c
--- dpdk-20.11.8/drivers/net/mlx5/mlx5_flow_dv.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/mlx5_flow_dv.c	2023-08-15 16:54:57.000000000 +0100
@@ -1499,6 +1499,8 @@
  *   Pointer to the rte_eth_dev structure.
  * @param[in] item
  *   Item specification.
+ * @param[in] tag_bitmap
+ *   Tag index bitmap.
  * @param[in] attr
  *   Attributes of flow that includes this item.
  * @param[out] error
@@ -1510,6 +1512,7 @@
 static int
 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
 			  const struct rte_flow_item *item,
+			  uint32_t *tag_bitmap,
 			  const struct rte_flow_attr *attr __rte_unused,
 			  struct rte_flow_error *error)
 {
@@ -1553,6 +1556,12 @@
 	if (ret < 0)
 		return ret;
 	MLX5_ASSERT(ret != REG_NON);
+	if (*tag_bitmap & (1 << ret))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+					  item->spec,
+					  "Duplicated tag index");
+	*tag_bitmap |= 1 << ret;
 	return 0;
 }
 
@@ -5313,7 +5322,8 @@
 		.std_tbl_fix = true,
 	};
 	const struct rte_eth_hairpin_conf *conf;
-	uint32_t tag_id = 0;
+	uint32_t tag_id = 0, tag_bitmap = 0;
+	const struct mlx5_rte_flow_item_tag *mlx5_tag;
 
 	if (items == NULL)
 		return -1;
@@ -5584,7 +5594,7 @@
 			last_item = MLX5_FLOW_LAYER_ICMP6;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TAG:
-			ret = flow_dv_validate_item_tag(dev, items,
+			ret = flow_dv_validate_item_tag(dev, items, &tag_bitmap,
 							attr, error);
 			if (ret < 0)
 				return ret;
@@ -5594,6 +5604,13 @@
 			last_item = MLX5_FLOW_ITEM_TX_QUEUE;
 			break;
 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+			mlx5_tag = (const struct mlx5_rte_flow_item_tag *)items->spec;
+			if (tag_bitmap & (1 << mlx5_tag->id))
+				return rte_flow_error_set(error, EINVAL,
+							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+							  items->spec,
+							  "Duplicated tag index");
+			tag_bitmap |= 1 << mlx5_tag->id;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GTP:
 			ret = flow_dv_validate_item_gtp(dev, items, item_flags,
diff -Nru dpdk-20.11.8/drivers/net/mlx5/mlx5.h dpdk-20.11.9/drivers/net/mlx5/mlx5.h
--- dpdk-20.11.8/drivers/net/mlx5/mlx5.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/mlx5.h	2023-08-15 16:54:57.000000000 +0100
@@ -945,6 +945,7 @@
 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
 	unsigned int sampler_en:1; /* Whether support sampler. */
 	unsigned int lb_used:1; /* Loopback queue is referred to. */
+	unsigned int rmv_notified:1; /* Notified about removal event */
 	uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
diff -Nru dpdk-20.11.8/drivers/net/mlx5/mlx5_rxq.c dpdk-20.11.9/drivers/net/mlx5/mlx5_rxq.c
--- dpdk-20.11.8/drivers/net/mlx5/mlx5_rxq.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/mlx5_rxq.c	2023-08-15 16:54:57.000000000 +0100
@@ -579,12 +579,12 @@
 	 * synchronized, that might be broken on RQ restart
 	 * and cause Rx malfunction, so queue stopping is
 	 * not supported if vectorized Rx burst is engaged.
-	 * The routine pointer depends on the process
-	 * type, should perform check there.
+	 * The routine pointer depends on the process type,
+	 * should perform check there. MPRQ is not supported as well.
 	 */
-	if (pkt_burst == mlx5_rx_burst_vec) {
-		DRV_LOG(ERR, "Rx queue stop is not supported "
-			"for vectorized Rx");
+	if (pkt_burst != mlx5_rx_burst) {
+		DRV_LOG(ERR, "Rx queue stop is only supported "
+			"for non-vectorized single-packet Rx");
 		rte_errno = EINVAL;
 		return -EINVAL;
 	}
@@ -1446,23 +1446,38 @@
 	} else {
 		*actual_log_stride_num = config->mprq.log_stride_num;
 	}
-	if (config->mprq.log_stride_size) {
-		/* Checks if chosen size of stride is in supported range. */
-		if (config->mprq.log_stride_size > log_max_stride_size ||
-		    config->mprq.log_stride_size < log_min_stride_size) {
-			*actual_log_stride_size = log_def_stride_size;
+	/* Checks if chosen size of stride is in supported range. */
+	if (config->mprq.log_stride_size > log_max_stride_size ||
+	    config->mprq.log_stride_size < log_min_stride_size) {
+		*actual_log_stride_size = log_def_stride_size;
+		DRV_LOG(WARNING,
+			"Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
+			dev->data->port_id, idx,
+			RTE_BIT32(log_def_stride_size));
+	} else {
+		*actual_log_stride_size = config->mprq.log_stride_size;
+	}
+	/* Make the stride fit the mbuf size by default. */
+	if (*actual_log_stride_size == MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) {
+		if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) {
 			DRV_LOG(WARNING,
-				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
-				dev->data->port_id, idx,
-				RTE_BIT32(log_def_stride_size));
+				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to match the mbuf size (%u)",
+				dev->data->port_id, idx, min_mbuf_size);
+			*actual_log_stride_size = log2above(min_mbuf_size);
 		} else {
-			*actual_log_stride_size = config->mprq.log_stride_size;
+			goto unsupport;
 		}
-	} else {
-		if (min_mbuf_size <= RTE_BIT32(log_max_stride_size))
-			*actual_log_stride_size = log2above(min_mbuf_size);
-		else
+	}
+	/* Make sure the stride size is greater than the headroom. */
+	if (RTE_BIT32(*actual_log_stride_size) < RTE_PKTMBUF_HEADROOM) {
+		if (RTE_BIT32(log_max_stride_size) > RTE_PKTMBUF_HEADROOM) {
+			DRV_LOG(WARNING,
+				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to accommodate the headroom (%u)",
+				dev->data->port_id, idx, RTE_PKTMBUF_HEADROOM);
+			*actual_log_stride_size = log2above(RTE_PKTMBUF_HEADROOM);
+		} else {
 			goto unsupport;
+		}
 	}
 	log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
 	/* Check if WQE buffer size is supported by hardware. */
diff -Nru dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx.c dpdk-20.11.9/drivers/net/mlx5/mlx5_rxtx.c
--- dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/mlx5_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -1620,6 +1620,7 @@
 	tcp->cksum = 0;
 	csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
 	csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
+	csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
 	csum = (~csum) & 0xffff;
 	if (csum == 0)
 		csum = 0xffff;
diff -Nru dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_neon.h dpdk-20.11.9/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
--- dpdk-20.11.8/drivers/net/mlx5/mlx5_rxtx_vec_neon.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/mlx5/mlx5_rxtx_vec_neon.h	2023-08-15 16:54:57.000000000 +0100
@@ -647,6 +647,14 @@
 		c0 = vld1q_u64((uint64_t *)(p0 + 48));
 		/* Synchronize for loading the rest of blocks. */
 		rte_io_rmb();
+		/* B.0 (CQE 3) reload lower half of the block. */
+		c3 = vld1q_lane_u64((uint64_t *)(p3 + 48), c3, 0);
+		/* B.0 (CQE 2) reload lower half of the block. */
+		c2 = vld1q_lane_u64((uint64_t *)(p2 + 48), c2, 0);
+		/* B.0 (CQE 1) reload lower half of the block. */
+		c1 = vld1q_lane_u64((uint64_t *)(p1 + 48), c1, 0);
+		/* B.0 (CQE 0) reload lower half of the block. */
+		c0 = vld1q_lane_u64((uint64_t *)(p0 + 48), c0, 0);
 		/* Prefetch next 4 CQEs. */
 		if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
 			unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
diff -Nru dpdk-20.11.8/drivers/net/netvsc/hn_rndis.c dpdk-20.11.9/drivers/net/netvsc/hn_rndis.c
--- dpdk-20.11.8/drivers/net/netvsc/hn_rndis.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/netvsc/hn_rndis.c	2023-08-15 16:54:57.000000000 +0100
@@ -329,7 +329,8 @@
 
 	hn_rndis_dump(data);
 
-	if (len < sizeof(3 * sizeof(uint32_t))) {
+	/* Check we can read first three data fields from RNDIS header */
+	if (len < 3 * sizeof(uint32_t)) {
 		PMD_DRV_LOG(ERR,
 			    "missing RNDIS header %u", len);
 		return;
diff -Nru dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_cppcore.c dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_cppcore.c
--- dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_cppcore.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_cppcore.c	2023-08-15 16:54:57.000000000 +0100
@@ -11,6 +11,7 @@
 #include <errno.h>
 #include <sys/types.h>
 
+#include <rte_bitops.h>
 #include <rte_byteorder.h>
 #include <rte_ethdev_pci.h>
 
@@ -118,6 +119,36 @@
 	return cpp_area->name;
 }
 
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x)       (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE          RTE_BIT32(12)
+
+static int
+nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+	int ret;
+	int mode;
+	int addr40;
+	uint32_t imbcppat;
+
+	imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU];
+	mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+	addr40 = imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE;
+
+	ret = nfp_cppat_mu_locality_lsb(mode, addr40);
+	if (ret < 0)
+		return ret;
+
+	cpp->mu_locality_lsb = ret;
+
+	return 0;
+}
+
+uint32_t
+nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+	return cpp->mu_locality_lsb;
+}
+
 /*
  * nfp_cpp_area_alloc - allocate a new CPP area
  * @cpp:    CPP handle
@@ -142,10 +173,6 @@
 	if (!cpp)
 		return NULL;
 
-	/* CPP bus uses only a 40-bit address */
-	if ((address + size) > (1ULL << 40))
-		return NFP_ERRPTR(EFAULT);
-
 	/* Remap from cpp_island to cpp_target */
 	err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
 	if (err < 0)
@@ -588,6 +615,13 @@
 		}
 	}
 
+	err = nfp_cpp_set_mu_locality_lsb(cpp);
+	if (err < 0) {
+		printf("Can't calculate MU locality bit offset");
+		free(cpp);
+		return NULL;
+	}
+
 	return cpp;
 }
 
@@ -819,8 +853,7 @@
 /*
  * nfp_cpp_map_area() - Helper function to map an area
  * @cpp:    NFP CPP handler
- * @domain: CPP domain
- * @target: CPP target
+ * @cpp_id: CPP ID
  * @addr:   CPP address
  * @size:   Size of the area
  * @area:   Area handle (output)
@@ -831,15 +864,12 @@
  * Return: Pointer to memory mapped area or ERR_PTR
  */
 uint8_t *
-nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr,
+nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr,
 		 unsigned long size, struct nfp_cpp_area **area)
 {
 	uint8_t *res;
-	uint32_t dest;
-
-	dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
 
-	*area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size);
+	*area = nfp_cpp_area_alloc_acquire(cpp, cpp_id, addr, size);
 	if (!*area)
 		goto err_eio;
 
diff -Nru dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_cpp.h dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_cpp.h
--- dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_cpp.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_cpp.h	2023-08-15 16:54:57.000000000 +0100
@@ -34,6 +34,9 @@
 	 */
 	uint32_t imb_cat_table[16];
 
+	/* MU access type bit offset */
+	uint32_t mu_locality_lsb;
+
 	int driver_lock_needed;
 };
 
@@ -363,7 +366,7 @@
  */
 void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
 
-uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target,
+uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id,
 			   uint64_t addr, unsigned long size,
 			   struct nfp_cpp_area **area);
 /*
@@ -778,4 +781,6 @@
  */
 int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
 
+uint32_t nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp);
+
 #endif /* !__NFP_CPP_H__ */
diff -Nru dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
--- dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c	2023-08-15 16:54:57.000000000 +0100
@@ -66,8 +66,8 @@
 #define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
 #define NFP_PCIE_P2C_GENERAL_SIZE(bar)             (1 << ((bar)->bitsize - 4))
 
-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \
-	(NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4)
+#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(id, bar, slot) \
+	(NFP_PCIE_BAR(id) + ((bar) * 8 + (slot)) * 4)
 
 #define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \
 	(((bar) * 8 + (slot)) * 4)
@@ -114,6 +114,7 @@
 	int secondary_lock;
 	char busdev[BUSDEV_SZ];
 	int barsz;
+	int dev_id;
 	char *cfg;
 };
 
@@ -255,7 +256,7 @@
 		return (-ENOMEM);
 
 	bar->csr = nfp->cfg +
-		   NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot);
+		   NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, base, slot);
 
 	*(uint32_t *)(bar->csr) = newcfg;
 
@@ -325,10 +326,8 @@
 		bar->base = 0;
 		bar->iomem = NULL;
 		bar->lock = 0;
-		bar->csr = nfp->cfg +
-			   NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3,
-							   bar->index & 7);
-
+		bar->csr = nfp->cfg + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id,
+				bar->index >> 3, bar->index & 7);
 		bar->iomem = nfp->cfg + (bar->index << bar->bitsize);
 	}
 	return 0;
@@ -843,6 +842,7 @@
 		goto error;
 
 	desc->cfg = (char *)dev->mem_resource[0].addr;
+	desc->dev_id = dev->addr.function & 0x7;
 
 	nfp_enable_bars(desc);
 
diff -Nru dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_rtsym.c dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_rtsym.c
--- dpdk-20.11.8/drivers/net/nfp/nfpcore/nfp_rtsym.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/nfp/nfpcore/nfp_rtsym.c	2023-08-15 16:54:57.000000000 +0100
@@ -232,6 +232,113 @@
 	return NULL;
 }
 
+static uint64_t
+nfp_rtsym_size(const struct nfp_rtsym *sym)
+{
+	switch (sym->type) {
+	case NFP_RTSYM_TYPE_NONE:
+		printf("rtsym '%s': type NONE", sym->name);
+		return 0;
+	case NFP_RTSYM_TYPE_OBJECT:    /* Fall through */
+	case NFP_RTSYM_TYPE_FUNCTION:
+		return sym->size;
+	case NFP_RTSYM_TYPE_ABS:
+		return sizeof(uint64_t);
+	default:
+		printf("rtsym '%s': unknown type: %d", sym->name, sym->type);
+		return 0;
+	}
+}
+
+static int
+nfp_rtsym_to_dest(struct nfp_cpp *cpp,
+		const struct nfp_rtsym *sym,
+		uint8_t action,
+		uint8_t token,
+		uint64_t offset,
+		uint32_t *cpp_id,
+		uint64_t *addr)
+{
+	if (sym->type != NFP_RTSYM_TYPE_OBJECT) {
+		printf("rtsym '%s': direct access to non-object rtsym",
+				sym->name);
+		return -EINVAL;
+	}
+
+	*addr = sym->addr + offset;
+
+	if (sym->target >= 0) {
+		*cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, sym->domain);
+	} else if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) {
+		int locality_off = nfp_cpp_mu_locality_lsb(cpp);
+
+		*addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+		*addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+
+		*cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token,
+				sym->domain);
+	} else {
+		printf("rtsym '%s': unhandled target encoding: %d",
+				sym->name, sym->target);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+nfp_rtsym_readl(struct nfp_cpp *cpp,
+		const struct nfp_rtsym *sym,
+		uint8_t action,
+		uint8_t token,
+		uint64_t offset,
+		uint32_t *value)
+{
+	int ret;
+	uint64_t addr;
+	uint32_t cpp_id;
+
+	if (offset + 4 > nfp_rtsym_size(sym)) {
+		printf("rtsym '%s': readl out of bounds", sym->name);
+		return -ENXIO;
+	}
+
+	ret = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr);
+	if (ret != 0)
+		return ret;
+
+	return nfp_cpp_readl(cpp, cpp_id, addr, value);
+}
+
+static int
+nfp_rtsym_readq(struct nfp_cpp *cpp,
+		const struct nfp_rtsym *sym,
+		uint8_t action,
+		uint8_t token,
+		uint64_t offset,
+		uint64_t *value)
+{
+	int ret;
+	uint64_t addr;
+	uint32_t cpp_id;
+
+	if (offset + 8 > nfp_rtsym_size(sym)) {
+		printf("rtsym '%s': readq out of bounds", sym->name);
+		return -ENXIO;
+	}
+
+	if (sym->type == NFP_RTSYM_TYPE_ABS) {
+		*value = sym->addr;
+		return 0;
+	}
+
+	ret = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr);
+	if (ret != 0)
+		return ret;
+
+	return nfp_cpp_readq(cpp, cpp_id, addr, value);
+}
+
 /*
  * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
  * @rtbl:	NFP RTsym table
@@ -248,7 +355,7 @@
 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error)
 {
 	const struct nfp_rtsym *sym;
-	uint32_t val32, id;
+	uint32_t val32;
 	uint64_t val;
 	int err;
 
@@ -258,19 +365,13 @@
 		goto exit;
 	}
 
-	id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
-
-#ifdef DEBUG
-	printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n",
-		name, sym->size, sym->addr);
-#endif
 	switch (sym->size) {
 	case 4:
-		err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
+		err = nfp_rtsym_readl(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val32);
 		val = val32;
 		break;
 	case 8:
-		err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
+		err = nfp_rtsym_readq(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val);
 		break;
 	default:
 		printf("rtsym '%s' unsupported size: %" PRId64 "\n",
@@ -295,8 +396,11 @@
 nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
 	      unsigned int min_size, struct nfp_cpp_area **area)
 {
-	const struct nfp_rtsym *sym;
+	int ret;
 	uint8_t *mem;
+	uint64_t addr;
+	uint32_t cpp_id;
+	const struct nfp_rtsym *sym;
 
 #ifdef DEBUG
 	printf("mapping symbol %s\n", name);
@@ -307,14 +411,20 @@
 		return NULL;
 	}
 
+	ret = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0,
+			&cpp_id, &addr);
+	if (ret != 0) {
+		printf("rtsym '%s': mapping failed", name);
+		return NULL;
+	}
+
 	if (sym->size < min_size) {
 		printf("Symbol %s too small (%" PRIu64 " < %u)\n", name,
 			sym->size, min_size);
 		return NULL;
 	}
 
-	mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr,
-			       sym->size, area);
+	mem = nfp_cpp_map_area(rtbl->cpp, cpp_id, addr, sym->size, area);
 	if (!mem) {
 		printf("Failed to map symbol %s\n", name);
 		return NULL;
diff -Nru dpdk-20.11.8/drivers/net/nfp/nfp_net.c dpdk-20.11.9/drivers/net/nfp/nfp_net.c
--- dpdk-20.11.8/drivers/net/nfp/nfp_net.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/nfp/nfp_net.c	2023-08-15 16:54:57.000000000 +0100
@@ -2238,10 +2238,14 @@
 static inline
 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
 {
+	uint32_t free_desc;
+
 	if (txq->wr_p >= txq->rd_p)
-		return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
+		free_desc = txq->tx_count - (txq->wr_p - txq->rd_p);
 	else
-		return txq->rd_p - txq->wr_p - 8;
+		free_desc = txq->rd_p - txq->wr_p;
+
+	return (free_desc > 8) ? (free_desc - 8) : 0;
 }
 
 /*
@@ -2797,6 +2801,7 @@
 
 	uint64_t tx_bar_off = 0, rx_bar_off = 0;
 	uint32_t start_q;
+	uint32_t cpp_id;
 	int stride = 4;
 	int port = 0;
 	int err;
@@ -2908,7 +2913,8 @@
 
 	if (hw->is_pf && port == 0) {
 		/* configure access to tx/rx vNIC BARs */
-		hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
+		cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
+		hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, cpp_id,
 						      NFP_PCIE_QUEUE(0),
 						      NFP_QCP_QUEUE_AREA_SZ,
 						      &hw->hwqueues_area);
diff -Nru dpdk-20.11.8/drivers/net/qede/qede_ethdev.c dpdk-20.11.9/drivers/net/qede/qede_ethdev.c
--- dpdk-20.11.8/drivers/net/qede/qede_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/qede/qede_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -2148,6 +2148,7 @@
 		rss_params.rss_enable = 1;
 	}
 
+	rss_params.update_rss_ind_table = 1;
 	rss_params.update_rss_config = 1;
 	/* tbl_size has to be set with capabilities */
 	rss_params.rss_table_size_log = 7;
diff -Nru dpdk-20.11.8/drivers/net/tap/rte_eth_tap.c dpdk-20.11.9/drivers/net/tap/rte_eth_tap.c
--- dpdk-20.11.8/drivers/net/tap/rte_eth_tap.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/tap/rte_eth_tap.c	2023-08-15 16:54:57.000000000 +0100
@@ -2292,8 +2292,8 @@
 	if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
 		static int iface_idx;
 
-		/* fixed mac = 00:64:74:61:70:<iface_idx> */
-		memcpy((char *)user_mac->addr_bytes, "\0dtap",
+		/* fixed mac = 02:64:74:61:70:<iface_idx> */
+		memcpy((char *)user_mac->addr_bytes, "\002dtap",
 			RTE_ETHER_ADDR_LEN);
 		user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
 			iface_idx++ + '0';
diff -Nru dpdk-20.11.8/drivers/net/txgbe/base/txgbe_hw.c dpdk-20.11.9/drivers/net/txgbe/base/txgbe_hw.c
--- dpdk-20.11.8/drivers/net/txgbe/base/txgbe_hw.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/txgbe/base/txgbe_hw.c	2023-08-15 16:54:57.000000000 +0100
@@ -2321,10 +2321,24 @@
 	}
 
 	if (speed & TXGBE_LINK_SPEED_1GB_FULL) {
+		u32 curr_autoneg;
+
 		speedcnt++;
 		if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN)
 			highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL;
 
+		status = hw->mac.check_link(hw, &link_speed, &link_up, false);
+		if (status != 0)
+			return status;
+
+		/* If we already have link at this speed, just jump out */
+		if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) {
+			curr_autoneg = rd32_epcs(hw, SR_MII_MMD_CTL);
+			if (link_up && (hw->autoneg ==
+					!!(curr_autoneg & SR_MII_MMD_CTL_AN_EN)))
+				goto out;
+		}
+
 		/* Set the module link speed */
 		switch (hw->phy.media_type) {
 		case txgbe_media_type_fiber:
diff -Nru dpdk-20.11.8/drivers/net/txgbe/base/txgbe_phy.c dpdk-20.11.9/drivers/net/txgbe/base/txgbe_phy.c
--- dpdk-20.11.8/drivers/net/txgbe/base/txgbe_phy.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/txgbe/base/txgbe_phy.c	2023-08-15 16:54:57.000000000 +0100
@@ -1347,7 +1347,9 @@
 	wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0105);
 	wr32_epcs(hw, SR_MII_MMD_DIGI_CTL, 0x0200);
 	value = rd32_epcs(hw, SR_MII_MMD_CTL);
-	value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9);
+	value = (value & ~0x1200) | (0x1 << 9);
+	if (hw->autoneg)
+		value |= SR_MII_MMD_CTL_AN_EN;
 	wr32_epcs(hw, SR_MII_MMD_CTL, value);
 	return 0;
 }
@@ -1454,8 +1456,9 @@
 		goto out;
 	}
 
-	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE,
-			~TXGBE_MACTXCFG_TXE);
+	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE);
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA);
+	hw->mac.disable_sec_tx_path(hw);
 
 	/* 2. Disable xpcs AN-73 */
 	if (!autoneg)
@@ -1654,8 +1657,9 @@
 		goto out;
 	}
 
-	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE,
-				~TXGBE_MACTXCFG_TXE);
+	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE);
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA);
+	hw->mac.disable_sec_tx_path(hw);
 
 	/* 2. Disable xpcs AN-73 */
 	if (!autoneg)
@@ -1849,8 +1853,9 @@
 		goto out;
 	}
 
-	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE,
-			~TXGBE_MACTXCFG_TXE);
+	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE);
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA);
+	hw->mac.disable_sec_tx_path(hw);
 
 	/* 2. Disable xpcs AN-73 */
 	wr32_epcs(hw, SR_AN_CTRL, 0x0);
@@ -2189,6 +2194,8 @@
 		txgbe_set_link_to_sfi(hw, speed);
 	}
 
+	hw->mac.enable_sec_tx_path(hw);
+
 	if (speed == TXGBE_LINK_SPEED_10GB_FULL)
 		mactxcfg = TXGBE_MACTXCFG_SPEED_10G;
 	else if (speed == TXGBE_LINK_SPEED_1GB_FULL)
@@ -2196,5 +2203,6 @@
 
 	/* enable mac transmitter */
 	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_SPEED_MASK, mactxcfg);
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, TXGBE_MACRXCFG_ENA);
 }
 
diff -Nru dpdk-20.11.8/drivers/net/txgbe/base/txgbe_type.h dpdk-20.11.9/drivers/net/txgbe/base/txgbe_type.h
--- dpdk-20.11.8/drivers/net/txgbe/base/txgbe_type.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/txgbe/base/txgbe_type.h	2023-08-15 16:54:57.000000000 +0100
@@ -670,6 +670,7 @@
 	bool adapter_stopped;
 	bool allow_unsupported_sfp;
 	bool need_crosstalk_fix;
+	bool autoneg;
 
 	uint64_t isb_dma;
 	void IOMEM *isb_mem;
diff -Nru dpdk-20.11.8/drivers/net/txgbe/txgbe_ethdev.c dpdk-20.11.9/drivers/net/txgbe/txgbe_ethdev.c
--- dpdk-20.11.8/drivers/net/txgbe/txgbe_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/txgbe/txgbe_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -169,7 +169,9 @@
 	HW_XSTAT(tx_total_packets),
 	HW_XSTAT(rx_total_missed_packets),
 	HW_XSTAT(rx_broadcast_packets),
+	HW_XSTAT(tx_broadcast_packets),
 	HW_XSTAT(rx_multicast_packets),
+	HW_XSTAT(tx_multicast_packets),
 	HW_XSTAT(rx_management_packets),
 	HW_XSTAT(tx_management_packets),
 	HW_XSTAT(rx_management_dropped),
@@ -1571,6 +1573,7 @@
 		speed = (TXGBE_LINK_SPEED_100M_FULL |
 			 TXGBE_LINK_SPEED_1GB_FULL |
 			 TXGBE_LINK_SPEED_10GB_FULL);
+		hw->autoneg = true;
 	} else {
 		if (*link_speeds & ETH_LINK_SPEED_10G)
 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
@@ -1582,6 +1585,7 @@
 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
 		if (*link_speeds & ETH_LINK_SPEED_100M)
 			speed |= TXGBE_LINK_SPEED_100M_FULL;
+		hw->autoneg = false;
 	}
 
 	err = hw->mac.setup_link(hw, speed, link_up);
@@ -1792,8 +1796,9 @@
 		rte_delay_ms(100);
 	} while (retries++ < (10 + TXGBE_LINK_UP_TIME));
 
-	/* cancel the delay handler before remove dev */
+	/* cancel all alarm handler before remove dev */
 	rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
+	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
 
 	/* uninitialize PF if max_vfs not zero */
 	txgbe_pf_host_uninit(dev);
diff -Nru dpdk-20.11.8/drivers/net/virtio/virtio_ethdev.c dpdk-20.11.9/drivers/net/virtio/virtio_ethdev.c
--- dpdk-20.11.8/drivers/net/virtio/virtio_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/virtio/virtio_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -1695,15 +1695,17 @@
 virtio_configure_intr(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
+	int ret;
 
 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
 		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
 		return -ENOTSUP;
 	}
 
-	if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
+	ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues);
+	if (ret < 0) {
 		PMD_INIT_LOG(ERR, "Fail to create eventfd");
-		return -1;
+		return ret;
 	}
 
 	if (!dev->intr_handle->intr_vec) {
@@ -1735,12 +1737,13 @@
 	 */
 	if (virtio_intr_enable(dev) < 0) {
 		PMD_DRV_LOG(ERR, "interrupt enable failed");
-		return -1;
+		return -EINVAL;
 	}
 
-	if (virtio_queues_bind_intr(dev) < 0) {
+	ret = virtio_queues_bind_intr(dev);
+	if (ret < 0) {
 		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
-		return -1;
+		return ret;
 	}
 
 	return 0;
@@ -1796,7 +1799,7 @@
 	/* Tell the host we've known how to drive the device. */
 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
 	if (virtio_negotiate_features(hw, req_features) < 0)
-		return -1;
+		return -EINVAL;
 
 	hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
 
@@ -1881,7 +1884,7 @@
 			if (config->mtu < RTE_ETHER_MIN_MTU) {
 				PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
 						config->mtu);
-				return -1;
+				return -EINVAL;
 			}
 
 			hw->max_mtu = config->mtu;
@@ -1913,10 +1916,11 @@
 		return ret;
 
 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
-		if (virtio_configure_intr(eth_dev) < 0) {
+		ret = virtio_configure_intr(eth_dev);
+		if (ret < 0) {
 			PMD_INIT_LOG(ERR, "failed to configure interrupt");
 			virtio_free_queues(hw);
-			return -1;
+			return ret;
 		}
 	}
 
diff -Nru dpdk-20.11.8/drivers/net/virtio/virtio_user/virtio_user_dev.c dpdk-20.11.9/drivers/net/virtio/virtio_user/virtio_user_dev.c
--- dpdk-20.11.8/drivers/net/virtio/virtio_user/virtio_user_dev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/virtio/virtio_user/virtio_user_dev.c	2023-08-15 16:54:57.000000000 +0100
@@ -475,10 +475,7 @@
 
 	parse_mac(dev, mac);
 
-	if (*ifname) {
-		dev->ifname = *ifname;
-		*ifname = NULL;
-	}
+	dev->ifname = *ifname;
 
 	if (virtio_user_dev_setup(dev) < 0) {
 		PMD_INIT_LOG(ERR, "backend set up fails");
@@ -592,6 +589,7 @@
 		}
 	}
 
+	*ifname = NULL;
 	return 0;
 }
 
diff -Nru dpdk-20.11.8/drivers/net/vmxnet3/vmxnet3_rxtx.c dpdk-20.11.9/drivers/net/vmxnet3/vmxnet3_rxtx.c
--- dpdk-20.11.8/drivers/net/vmxnet3/vmxnet3_rxtx.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/drivers/net/vmxnet3/vmxnet3_rxtx.c	2023-08-15 16:54:57.000000000 +0100
@@ -413,8 +413,8 @@
 
 	nb_tx = 0;
 	while (nb_tx < nb_pkts) {
-		Vmxnet3_GenericDesc *gdesc;
-		vmxnet3_buf_info_t *tbi;
+		Vmxnet3_GenericDesc *gdesc = NULL;
+		vmxnet3_buf_info_t *tbi = NULL;
 		uint32_t first2fill, avail, dw2;
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		struct rte_mbuf *m_seg = txm;
@@ -458,18 +458,18 @@
 			continue;
 		}
 
+		/* Skip empty packets */
+		if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
+			txq->stats.drop_total++;
+			rte_pktmbuf_free(txm);
+			nb_tx++;
+			continue;
+		}
+
 		if (txm->nb_segs == 1 &&
 		    rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
 			struct Vmxnet3_TxDataDesc *tdd;
 
-			/* Skip empty packets */
-			if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
-				txq->stats.drop_total++;
-				rte_pktmbuf_free(txm);
-				nb_tx++;
-				continue;
-			}
-
 			tdd = (struct Vmxnet3_TxDataDesc *)
 				((uint8 *)txq->data_ring.base +
 				 txq->cmd_ring.next2fill *
@@ -482,6 +482,10 @@
 		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
 		first2fill = txq->cmd_ring.next2fill;
 		do {
+			/* Skip empty segments */
+			if (unlikely(m_seg->data_len == 0))
+				continue;
+
 			/* Remember the transmit buffer for cleanup */
 			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
 
@@ -491,10 +495,6 @@
 			 */
 			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
 
-			/* Skip empty segments */
-			if (unlikely(m_seg->data_len == 0))
-				continue;
-
 			if (copy_size) {
 				uint64 offset =
 					(uint64)txq->cmd_ring.next2fill *
@@ -515,6 +515,11 @@
 			/* use the right gen for non-SOP desc */
 			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 		} while ((m_seg = m_seg->next) != NULL);
+		/* We must have executed the complete preceding loop at least
+		 * once without skipping an empty segment, as we can't have
+		 * a packet with only empty segments.
+		 * Thus, tbi and gdesc have been initialized.
+		 */
 
 		/* set the last buf_info for the pkt */
 		tbi->m = txm;
@@ -1265,11 +1270,18 @@
 		for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
 			/* Passing 0 as alloc_num will allocate full ring */
 			ret = vmxnet3_post_rx_bufs(rxq, j);
-			if (ret <= 0) {
+
+			/* Zero number of descriptors in the configuration of the RX queue */
+			if (ret == 0) {
 				PMD_INIT_LOG(ERR,
-					     "ERROR: Posting Rxq: %d buffers ring: %d",
-					     i, j);
-				return -ret;
+					"Invalid configuration in Rx queue: %d, buffers ring: %d\n",
+					i, j);
+				return -EINVAL;
+			}
+			/* Return the error number */
+			if (ret < 0) {
+				PMD_INIT_LOG(ERR, "Posting Rxq: %d buffers ring: %d", i, j);
+				return ret;
 			}
 			/*
 			 * Updating device with the index:next2fill to fill the
diff -Nru dpdk-20.11.8/examples/fips_validation/main.c dpdk-20.11.9/examples/fips_validation/main.c
--- dpdk-20.11.8/examples/fips_validation/main.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/examples/fips_validation/main.c	2023-08-15 16:54:57.000000000 +0100
@@ -698,7 +698,7 @@
 			RTE_LOG(ERR, USER1, "Not enough memory\n");
 			return -ENOMEM;
 		}
-		env.digest_len = vec.cipher_auth.digest.len;
+		env.digest_len = vec.aead.digest.len;
 
 		sym->aead.data.length = vec.pt.len;
 		sym->aead.digest.data = env.digest;
@@ -707,7 +707,7 @@
 		ret = prepare_data_mbufs(&vec.ct);
 		if (ret < 0)
 			return ret;
-
+		env.digest_len = vec.aead.digest.len;
 		sym->aead.data.length = vec.ct.len;
 		sym->aead.digest.data = vec.aead.digest.val;
 		sym->aead.digest.phys_addr = rte_malloc_virt2iova(
@@ -1835,6 +1835,7 @@
 	if (env.digest) {
 		rte_free(env.digest);
 		env.digest = NULL;
+		env.digest_len = 0;
 	}
 	if (env.mbuf)
 		rte_pktmbuf_free(env.mbuf);
diff -Nru dpdk-20.11.8/examples/ip_pipeline/thread.c dpdk-20.11.9/examples/ip_pipeline/thread.c
--- dpdk-20.11.8/examples/ip_pipeline/thread.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/examples/ip_pipeline/thread.c	2023-08-15 16:54:57.000000000 +0100
@@ -432,7 +432,7 @@
 static inline struct thread_msg_req *
 thread_msg_recv(struct rte_ring *msgq_req)
 {
-	struct thread_msg_req *req;
+	struct thread_msg_req *req = NULL;
 
 	int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
 
diff -Nru dpdk-20.11.8/examples/ipsec-secgw/test/common_defs.sh dpdk-20.11.9/examples/ipsec-secgw/test/common_defs.sh
--- dpdk-20.11.8/examples/ipsec-secgw/test/common_defs.sh	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/examples/ipsec-secgw/test/common_defs.sh	2023-08-15 16:54:57.000000000 +0100
@@ -26,7 +26,7 @@
 
 LOCAL_IFACE=dtap0
 
-LOCAL_MAC="00:64:74:61:70:30"
+LOCAL_MAC="02:64:74:61:70:30"
 
 REMOTE_IPV4=192.168.31.14
 LOCAL_IPV4=192.168.31.92
diff -Nru dpdk-20.11.8/examples/l2fwd-cat/Makefile dpdk-20.11.9/examples/l2fwd-cat/Makefile
--- dpdk-20.11.8/examples/l2fwd-cat/Makefile	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/examples/l2fwd-cat/Makefile	2023-08-15 16:54:57.000000000 +0100
@@ -27,6 +27,7 @@
 LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk)
 
 CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -D_GNU_SOURCE
 
 LDFLAGS += -lpqos
 
diff -Nru dpdk-20.11.8/examples/ntb/ntb_fwd.c dpdk-20.11.9/examples/ntb/ntb_fwd.c
--- dpdk-20.11.8/examples/ntb/ntb_fwd.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/examples/ntb/ntb_fwd.c	2023-08-15 16:54:57.000000000 +0100
@@ -865,7 +865,7 @@
 
 	/* Clear NTB dev stats */
 	nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
-	if (nb_ids  < 0) {
+	if (nb_ids <= 0) {
 		printf("Error: Cannot get count of xstats\n");
 		return;
 	}
@@ -923,7 +923,7 @@
 
 	/* Get NTB dev stats and stats names */
 	nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0);
-	if (nb_ids  < 0) {
+	if (nb_ids <= 0) {
 		printf("Error: Cannot get count of xstats\n");
 		return;
 	}
diff -Nru dpdk-20.11.8/kernel/freebsd/contigmem/contigmem.c dpdk-20.11.9/kernel/freebsd/contigmem/contigmem.c
--- dpdk-20.11.8/kernel/freebsd/contigmem/contigmem.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/kernel/freebsd/contigmem/contigmem.c	2023-08-15 16:54:57.000000000 +0100
@@ -111,7 +111,7 @@
 };
 
 static int
-contigmem_load()
+contigmem_load(void)
 {
 	char index_string[8], description[32];
 	int  i, error = 0;
@@ -178,7 +178,7 @@
 }
 
 static int
-contigmem_unload()
+contigmem_unload(void)
 {
 	int i;
 
diff -Nru dpdk-20.11.8/kernel/linux/kni/compat.h dpdk-20.11.9/kernel/linux/kni/compat.h
--- dpdk-20.11.8/kernel/linux/kni/compat.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/kernel/linux/kni/compat.h	2023-08-15 16:54:57.000000000 +0100
@@ -151,3 +151,7 @@
 	 RHEL_RELEASE_VERSION(9, 1) <= RHEL_RELEASE_CODE))
 #define HAVE_NETIF_RX_NI
 #endif
+
+#if KERNEL_VERSION(6, 5, 0) > LINUX_VERSION_CODE
+#define HAVE_VMA_IN_GUP
+#endif
diff -Nru dpdk-20.11.8/kernel/linux/kni/kni_dev.h dpdk-20.11.9/kernel/linux/kni/kni_dev.h
--- dpdk-20.11.8/kernel/linux/kni/kni_dev.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/kernel/linux/kni/kni_dev.h	2023-08-15 16:54:57.000000000 +0100
@@ -105,11 +105,13 @@
 
 	/* Read one page struct info */
 #ifdef HAVE_TSK_IN_GUP
-	ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
-				    FOLL_TOUCH, &page, NULL, NULL);
+	ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, 0, &page, NULL, NULL);
 #else
-	ret = get_user_pages_remote(tsk->mm, iova, 1,
-				    FOLL_TOUCH, &page, NULL, NULL);
+  #ifdef HAVE_VMA_IN_GUP
+	ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL, NULL);
+  #else
+	ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL);
+  #endif
 #endif
 	if (ret < 0)
 		return 0;
diff -Nru dpdk-20.11.8/lib/librte_eal/common/eal_common_debug.c dpdk-20.11.9/lib/librte_eal/common/eal_common_debug.c
--- dpdk-20.11.8/lib/librte_eal/common/eal_common_debug.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/common/eal_common_debug.c	2023-08-15 16:54:57.000000000 +0100
@@ -3,9 +3,12 @@
  */
 
 #include <stdarg.h>
+#include <errno.h>
+
 #include <rte_eal.h>
 #include <rte_log.h>
 #include <rte_debug.h>
+#include <rte_errno.h>
 
 void
 __rte_panic(const char *funcname, const char *format, ...)
@@ -37,7 +40,7 @@
 	rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
 	va_end(ap);
 
-	if (rte_eal_cleanup() != 0)
+	if (rte_eal_cleanup() != 0 && rte_errno != EALREADY)
 		RTE_LOG(CRIT, EAL,
 			"EAL could not release all resources\n");
 	exit(exit_code);
diff -Nru dpdk-20.11.8/lib/librte_eal/common/eal_common_dynmem.c dpdk-20.11.9/lib/librte_eal/common/eal_common_dynmem.c
--- dpdk-20.11.8/lib/librte_eal/common/eal_common_dynmem.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/common/eal_common_dynmem.c	2023-08-15 16:54:57.000000000 +0100
@@ -119,8 +119,7 @@
 	max_seglists_per_type = RTE_MAX_MEMSEG_LISTS / n_memtypes;
 
 	if (max_seglists_per_type == 0) {
-		RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase %s\n",
-			RTE_STR(RTE_MAX_MEMSEG_LISTS));
+		RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase RTE_MAX_MEMSEG_LISTS\n");
 		goto out;
 	}
 
@@ -179,8 +178,7 @@
 		for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {
 			if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
 				RTE_LOG(ERR, EAL,
-					"No more space in memseg lists, please increase %s\n",
-					RTE_STR(RTE_MAX_MEMSEG_LISTS));
+					"No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n");
 				goto out;
 			}
 			msl = &mcfg->memsegs[msl_idx++];
diff -Nru dpdk-20.11.8/lib/librte_eal/common/eal_common_proc.c dpdk-20.11.9/lib/librte_eal/common/eal_common_proc.c
--- dpdk-20.11.8/lib/librte_eal/common/eal_common_proc.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/common/eal_common_proc.c	2023-08-15 16:54:57.000000000 +0100
@@ -324,6 +324,15 @@
 }
 
 static void
+cleanup_msg_fds(const struct rte_mp_msg *msg)
+{
+	int i;
+
+	for (i = 0; i < msg->num_fds; i++)
+		close(msg->fds[i]);
+}
+
+static void
 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
 {
 	struct pending_request *pending_req;
@@ -351,8 +360,10 @@
 			else if (pending_req->type == REQUEST_TYPE_ASYNC)
 				req = async_reply_handle_thread_unsafe(
 						pending_req);
-		} else
+		} else {
 			RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
+			cleanup_msg_fds(msg);
+		}
 		pthread_mutex_unlock(&pending_requests.lock);
 
 		if (req != NULL)
@@ -382,6 +393,7 @@
 			RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
 				msg->name);
 		}
+		cleanup_msg_fds(msg);
 	} else if (action(msg, s->sun_path) < 0) {
 		RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
 	}
diff -Nru dpdk-20.11.8/lib/librte_eal/freebsd/eal.c dpdk-20.11.9/lib/librte_eal/freebsd/eal.c
--- dpdk-20.11.8/lib/librte_eal/freebsd/eal.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/freebsd/eal.c	2023-08-15 16:54:57.000000000 +0100
@@ -981,6 +981,16 @@
 int
 rte_eal_cleanup(void)
 {
+	static uint32_t run_once;
+	uint32_t has_run = 0;
+
+	if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
+			__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+		RTE_LOG(WARNING, EAL, "Already called cleanup\n");
+		rte_errno = EALREADY;
+		return -1;
+	}
+
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 	rte_service_finalize();
diff -Nru dpdk-20.11.8/lib/librte_eal/freebsd/eal_memory.c dpdk-20.11.9/lib/librte_eal/freebsd/eal_memory.c
--- dpdk-20.11.8/lib/librte_eal/freebsd/eal_memory.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/freebsd/eal_memory.c	2023-08-15 16:54:57.000000000 +0100
@@ -172,9 +172,8 @@
 				break;
 			}
 			if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
-				RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
-					RTE_STR(RTE_MAX_MEMSEG_PER_TYPE),
-					RTE_STR(RTE_MAX_MEM_MB_PER_TYPE));
+				RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST "
+					"RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.\n");
 				return -1;
 			}
 			arr = &msl->memseg_arr;
@@ -404,8 +403,7 @@
 
 			if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
 				RTE_LOG(ERR, EAL,
-					"No more space in memseg lists, please increase %s\n",
-					RTE_STR(RTE_MAX_MEMSEG_LISTS));
+					"No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n");
 				return -1;
 			}
 
diff -Nru dpdk-20.11.8/lib/librte_eal/linux/eal.c dpdk-20.11.9/lib/librte_eal/linux/eal.c
--- dpdk-20.11.8/lib/librte_eal/linux/eal.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/linux/eal.c	2023-08-15 16:54:57.000000000 +0100
@@ -1058,12 +1058,6 @@
 		}
 	}
 
-	/* register multi-process action callbacks for hotplug */
-	if (eal_mp_dev_hotplug_init() < 0) {
-		rte_eal_init_alert("failed to register mp callback for hotplug");
-		return -1;
-	}
-
 	if (rte_bus_scan()) {
 		rte_eal_init_alert("Cannot scan the buses for devices");
 		rte_errno = ENODEV;
@@ -1202,6 +1196,12 @@
 		return -1;
 	}
 
+	/* register multi-process action callbacks for hotplug after memory init */
+	if (eal_mp_dev_hotplug_init() < 0) {
+		rte_eal_init_alert("failed to register mp callback for hotplug");
+		return -1;
+	}
+
 	if (rte_eal_tailqs_init() < 0) {
 		rte_eal_init_alert("Cannot init tail queues for objects");
 		rte_errno = EFAULT;
@@ -1352,6 +1352,16 @@
 int
 rte_eal_cleanup(void)
 {
+	static uint32_t run_once;
+	uint32_t has_run = 0;
+
+	if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
+					__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+		RTE_LOG(WARNING, EAL, "Already called cleanup\n");
+		rte_errno = EALREADY;
+		return -1;
+	}
+
 	/* if we're in a primary process, we need to mark hugepages as freeable
 	 * so that finalization can release them back to the system.
 	 */
diff -Nru dpdk-20.11.8/lib/librte_eal/linux/eal_memory.c dpdk-20.11.9/lib/librte_eal/linux/eal_memory.c
--- dpdk-20.11.8/lib/librte_eal/linux/eal_memory.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/linux/eal_memory.c	2023-08-15 16:54:57.000000000 +0100
@@ -686,6 +686,7 @@
 
 	/* find free space in memseg lists */
 	for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+		int free_len;
 		bool empty;
 		msl = &mcfg->memsegs[msl_idx];
 		arr = &msl->memseg_arr;
@@ -697,24 +698,31 @@
 
 		/* leave space for a hole if array is not empty */
 		empty = arr->count == 0;
-		ms_idx = rte_fbarray_find_next_n_free(arr, 0,
-				seg_len + (empty ? 0 : 1));
-
-		/* memseg list is full? */
+		/* find start of the biggest contiguous block and its size */
+		ms_idx = rte_fbarray_find_biggest_free(arr, 0);
 		if (ms_idx < 0)
 			continue;
-
+		/* hole is 1 segment long, so at least two segments long. */
+		free_len = rte_fbarray_find_contig_free(arr, ms_idx);
+		if (free_len < 2)
+			continue;
 		/* leave some space between memsegs, they are not IOVA
 		 * contiguous, so they shouldn't be VA contiguous either.
 		 */
-		if (!empty)
+		if (!empty) {
 			ms_idx++;
+			free_len--;
+		}
+
+		/* we might not get all of the space we wanted */
+		free_len = RTE_MIN(seg_len, free_len);
+		seg_end = seg_start + free_len;
+		seg_len = seg_end - seg_start;
 		break;
 	}
 	if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
-		RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
-				RTE_STR(RTE_MAX_MEMSEG_PER_TYPE),
-				RTE_STR(RTE_MAX_MEM_MB_PER_TYPE));
+		RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST "
+			"RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.\n");
 		return -1;
 	}
 
@@ -792,7 +800,7 @@
 	}
 	RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
 			(seg_len * page_sz) >> 20, socket_id);
-	return 0;
+	return seg_len;
 }
 
 static uint64_t
@@ -962,8 +970,7 @@
 				break;
 			}
 			if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
-				RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
-					RTE_STR(RTE_MAX_MEMSEG_LISTS));
+				RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n");
 				return -1;
 			}
 
@@ -1027,10 +1034,16 @@
 		if (new_memseg) {
 			/* if this isn't the first time, remap segment */
 			if (cur_page != 0) {
-				ret = remap_segment(hugepages, seg_start_page,
-						cur_page);
-				if (ret != 0)
-					return -1;
+				int n_remapped = 0;
+				int n_needed = cur_page - seg_start_page;
+				while (n_remapped < n_needed) {
+					ret = remap_segment(hugepages, seg_start_page,
+							cur_page);
+					if (ret < 0)
+						return -1;
+					n_remapped += ret;
+					seg_start_page += ret;
+				}
 			}
 			/* remember where we started */
 			seg_start_page = cur_page;
@@ -1039,10 +1052,16 @@
 	}
 	/* we were stopped, but we didn't remap the last segment, do it now */
 	if (cur_page != 0) {
-		ret = remap_segment(hugepages, seg_start_page,
-				cur_page);
-		if (ret != 0)
-			return -1;
+		int n_remapped = 0;
+		int n_needed = cur_page - seg_start_page;
+		while (n_remapped < n_needed) {
+			ret = remap_segment(hugepages, seg_start_page,
+					cur_page);
+			if (ret < 0)
+				return -1;
+			n_remapped += ret;
+			seg_start_page += ret;
+		}
 	}
 	return 0;
 }
@@ -1817,8 +1836,7 @@
 
 				if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
 					RTE_LOG(ERR, EAL,
-						"No more space in memseg lists, please increase %s\n",
-						RTE_STR(RTE_MAX_MEMSEG_LISTS));
+						"No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n");
 					return -1;
 				}
 
diff -Nru dpdk-20.11.8/lib/librte_eal/x86/include/rte_memcpy.h dpdk-20.11.9/lib/librte_eal/x86/include/rte_memcpy.h
--- dpdk-20.11.8/lib/librte_eal/x86/include/rte_memcpy.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eal/x86/include/rte_memcpy.h	2023-08-15 16:54:57.000000000 +0100
@@ -846,7 +846,7 @@
 	}
 
 	/* Copy 64 bytes blocks */
-	for (; n >= 64; n -= 64) {
+	for (; n > 64; n -= 64) {
 		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
 		dst = (uint8_t *)dst + 64;
 		src = (const uint8_t *)src + 64;
diff -Nru dpdk-20.11.8/lib/librte_ethdev/rte_ethdev.c dpdk-20.11.9/lib/librte_ethdev/rte_ethdev.c
--- dpdk-20.11.8/lib/librte_ethdev/rte_ethdev.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_ethdev/rte_ethdev.c	2023-08-15 16:54:57.000000000 +0100
@@ -4032,6 +4032,12 @@
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 	dev = &rte_eth_devices[port_id];
+
+	if (fec_capa == 0) {
+		RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n");
+		return -EINVAL;
+	}
+
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
 }
@@ -4150,6 +4156,7 @@
 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
 {
 	struct rte_eth_dev *dev;
+	int index;
 	int ret;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -4160,6 +4167,15 @@
 	dev = &rte_eth_devices[port_id];
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
 
+	/* Keep address unique in dev->data->mac_addrs[]. */
+	index = eth_dev_get_mac_addr_index(port_id, addr);
+	if (index > 0) {
+		RTE_ETHDEV_LOG(ERR,
+			"New default address for port %u was already in the address list. Please remove it first.\n",
+			port_id);
+		return -EEXIST;
+	}
+
 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
 	if (ret < 0)
 		return ret;
diff -Nru dpdk-20.11.8/lib/librte_ethdev/rte_ethdev.h dpdk-20.11.9/lib/librte_ethdev/rte_ethdev.h
--- dpdk-20.11.8/lib/librte_ethdev/rte_ethdev.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_ethdev/rte_ethdev.h	2023-08-15 16:54:57.000000000 +0100
@@ -3713,10 +3713,7 @@
  * @param port_id
  *   The port identifier of the Ethernet device.
  * @param fec_capa
- *   A bitmask of enabled FEC modes. If AUTO bit is set, other
- *   bits specify FEC modes which may be negotiated. If AUTO
- *   bit is clear, specify FEC modes to be used (only one valid
- *   mode per speed may be set).
+ *   A bitmask with the current FEC mode.
  * @return
  *   - (0) if successful.
  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
@@ -3736,10 +3733,13 @@
  * @param port_id
  *   The port identifier of the Ethernet device.
  * @param fec_capa
- *   A bitmask of allowed FEC modes. If AUTO bit is set, other
- *   bits specify FEC modes which may be negotiated. If AUTO
- *   bit is clear, specify FEC modes to be used (only one valid
- *   mode per speed may be set).
+ *   A bitmask of allowed FEC modes.
+ *   If only the AUTO bit is set, the decision on which FEC
+ *   mode to use will be made by HW/FW or driver.
+ *   If the AUTO bit is set with some FEC modes, only specified
+ *   FEC modes can be set.
+ *   If AUTO bit is clear, specify FEC mode to be used
+ *   (only one valid mode per speed may be set).
  * @return
  *   - (0) if successful.
  *   - (-EINVAL) if the FEC mode is not valid.
@@ -3840,6 +3840,9 @@
 
 /**
  * Set the default MAC address.
+ * It replaces the address at index 0 of the MAC address list.
+ * If the address was already in the MAC address list,
+ * please remove it first.
  *
  * @param port_id
  *   The port identifier of the Ethernet device.
@@ -3850,6 +3853,7 @@
  *   - (-ENOTSUP) if hardware doesn't support.
  *   - (-ENODEV) if *port* invalid.
  *   - (-EINVAL) if MAC address is invalid.
+ *   - (-EEXIST) if MAC address was already in the address list.
  */
 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
 		struct rte_ether_addr *mac_addr);
diff -Nru dpdk-20.11.8/lib/librte_ethdev/rte_ethdev_pci.h dpdk-20.11.9/lib/librte_ethdev/rte_ethdev_pci.h
--- dpdk-20.11.8/lib/librte_ethdev/rte_ethdev_pci.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_ethdev/rte_ethdev_pci.h	2023-08-15 16:54:57.000000000 +0100
@@ -126,6 +126,9 @@
 	struct rte_eth_dev *eth_dev;
 	int ret;
 
+	if (*dev_init == NULL)
+		return -EINVAL;
+
 	eth_dev = rte_eth_dev_pci_allocate(pci_dev, private_data_size);
 	if (!eth_dev)
 		return -ENOMEM;
diff -Nru dpdk-20.11.8/lib/librte_eventdev/rte_event_timer_adapter.c dpdk-20.11.9/lib/librte_eventdev/rte_event_timer_adapter.c
--- dpdk-20.11.8/lib/librte_eventdev/rte_event_timer_adapter.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_eventdev/rte_event_timer_adapter.c	2023-08-15 16:54:57.000000000 +0100
@@ -765,17 +765,18 @@
 				     sw->n_expired_timers);
 		sw->n_expired_timers = 0;
 
-		event_buffer_flush(&sw->buffer,
-				   adapter->data->event_dev_id,
-				   adapter->data->event_port_id,
-				   &nb_evs_flushed,
-				   &nb_evs_invalid);
-
-		sw->stats.ev_enq_count += nb_evs_flushed;
-		sw->stats.ev_inv_count += nb_evs_invalid;
 		sw->stats.adapter_tick_count++;
 	}
 
+	event_buffer_flush(&sw->buffer,
+			   adapter->data->event_dev_id,
+			   adapter->data->event_port_id,
+			   &nb_evs_flushed,
+			   &nb_evs_invalid);
+
+	sw->stats.ev_enq_count += nb_evs_flushed;
+	sw->stats.ev_inv_count += nb_evs_invalid;
+
 	return 0;
 }
 
diff -Nru dpdk-20.11.8/lib/librte_fib/dir24_8.c dpdk-20.11.9/lib/librte_fib/dir24_8.c
--- dpdk-20.11.8/lib/librte_fib/dir24_8.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_fib/dir24_8.c	2023-08-15 16:54:57.000000000 +0100
@@ -394,7 +394,7 @@
 				(uint32_t)(1ULL << (32 - tmp_depth));
 		} else {
 			redge = ip + (uint32_t)(1ULL << (32 - depth));
-			if (ledge == redge)
+			if (ledge == redge && ledge != 0)
 				break;
 			ret = install_to_fib(dp, ledge, redge,
 				next_hop);
diff -Nru dpdk-20.11.8/lib/librte_mbuf/rte_mbuf_core.h dpdk-20.11.9/lib/librte_mbuf/rte_mbuf_core.h
--- dpdk-20.11.8/lib/librte_mbuf/rte_mbuf_core.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_mbuf/rte_mbuf_core.h	2023-08-15 16:54:57.000000000 +0100
@@ -582,8 +582,8 @@
 				 * @see rte_event_eth_tx_adapter_txq_set()
 				 */
 			} txadapter; /**< Eventdev ethdev Tx adapter */
-			/**< User defined tags. See rte_distributor_process() */
 			uint32_t usr;
+			/**< User defined tags. See rte_distributor_process() */
 		} hash;                   /**< hash information */
 	};
 
diff -Nru dpdk-20.11.8/lib/librte_pci/rte_pci.h dpdk-20.11.9/lib/librte_pci/rte_pci.h
--- dpdk-20.11.8/lib/librte_pci/rte_pci.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_pci/rte_pci.h	2023-08-15 16:54:57.000000000 +0100
@@ -100,8 +100,7 @@
 
 /**
  * Utility function to write a pci device name, this device name can later be
- * used to retrieve the corresponding rte_pci_addr using eal_parse_pci_*
- * BDF helpers.
+ * used to retrieve the corresponding rte_pci_addr using rte_pci_addr_parse().
  *
  * @param addr
  *	The PCI Bus-Device-Function address
diff -Nru dpdk-20.11.8/lib/librte_ring/rte_ring.c dpdk-20.11.9/lib/librte_ring/rte_ring.c
--- dpdk-20.11.8/lib/librte_ring/rte_ring.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_ring/rte_ring.c	2023-08-15 16:54:57.000000000 +0100
@@ -341,11 +341,6 @@
 		return;
 	}
 
-	if (rte_memzone_free(r->memzone) != 0) {
-		RTE_LOG(ERR, RING, "Cannot free memory\n");
-		return;
-	}
-
 	ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
 	rte_mcfg_tailq_write_lock();
 
@@ -364,6 +359,9 @@
 
 	rte_mcfg_tailq_write_unlock();
 
+	if (rte_memzone_free(r->memzone) != 0)
+		RTE_LOG(ERR, RING, "Cannot free memory\n");
+
 	rte_free(te);
 }
 
diff -Nru dpdk-20.11.8/lib/librte_telemetry/telemetry.c dpdk-20.11.9/lib/librte_telemetry/telemetry.c
--- dpdk-20.11.8/lib/librte_telemetry/telemetry.c	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_telemetry/telemetry.c	2023-08-15 16:54:57.000000000 +0100
@@ -217,7 +217,11 @@
 				break;
 			case RTE_TEL_CONTAINER:
 			{
-				char temp[buf_len];
+				char *temp = malloc(buf_len);
+				if (temp == NULL)
+					break;
+				*temp = '\0';  /* ensure valid string */
+
 				const struct container *cont =
 						&v->value.container;
 				if (container_to_json(cont->data,
@@ -228,6 +232,7 @@
 							v->name, temp);
 				if (!cont->keep)
 					rte_tel_data_free(cont->data);
+				free(temp);
 			}
 			}
 		}
@@ -259,7 +264,11 @@
 						buf_len, used,
 						d->data.array[i].u64val);
 			else if (d->type == RTE_TEL_ARRAY_CONTAINER) {
-				char temp[buf_len];
+				char *temp = malloc(buf_len);
+				if (temp == NULL)
+					break;
+				*temp = '\0';  /* ensure valid string */
+
 				const struct container *rec_data =
 						&d->data.array[i].container;
 				if (container_to_json(rec_data->data,
@@ -269,6 +278,7 @@
 							buf_len, used, temp);
 				if (!rec_data->keep)
 					rte_tel_data_free(rec_data->data);
+				free(temp);
 			}
 		used += prefix_used;
 		used += strlcat(out_buf + used, "}", sizeof(out_buf) - used);
diff -Nru dpdk-20.11.8/lib/librte_vhost/vhost.h dpdk-20.11.9/lib/librte_vhost/vhost.h
--- dpdk-20.11.8/lib/librte_vhost/vhost.h	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/lib/librte_vhost/vhost.h	2023-08-15 16:54:57.000000000 +0100
@@ -764,9 +764,9 @@
 			vhost_used_event(vq),
 			old, new);
 
-		if ((vhost_need_event(vhost_used_event(vq), new, old) &&
-					(vq->callfd >= 0)) ||
-				unlikely(!signalled_used_valid)) {
+		if ((vhost_need_event(vhost_used_event(vq), new, old) ||
+					unlikely(!signalled_used_valid)) &&
+				vq->callfd >= 0) {
 			eventfd_write(vq->callfd, (eventfd_t) 1);
 			if (dev->notify_ops->guest_notified)
 				dev->notify_ops->guest_notified(dev->vid);
@@ -829,7 +829,7 @@
 	if (vhost_need_event(off, new, old))
 		kick = true;
 kick:
-	if (kick) {
+	if (kick && vq->callfd >= 0) {
 		eventfd_write(vq->callfd, (eventfd_t)1);
 		if (dev->notify_ops->guest_notified)
 			dev->notify_ops->guest_notified(dev->vid);
diff -Nru dpdk-20.11.8/meson.build dpdk-20.11.9/meson.build
--- dpdk-20.11.8/meson.build	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/meson.build	2023-08-15 16:54:57.000000000 +0100
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017-2019 Intel Corporation
 
-project('DPDK', 'C',
+project('DPDK', 'c',
 	# Get version number from file.
 	# Fallback to "more" for Windows compatibility.
 	version: run_command(find_program('cat', 'more'),
diff -Nru dpdk-20.11.8/VERSION dpdk-20.11.9/VERSION
--- dpdk-20.11.8/VERSION	2023-04-27 18:57:22.000000000 +0100
+++ dpdk-20.11.9/VERSION	2023-08-15 16:54:57.000000000 +0100
@@ -1 +1 @@
-20.11.8
+20.11.9

Attachment: signature.asc
Description: This is a digitally signed message part


Reply to: