[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#896689: marked as done (stretch-pu: package dpdk/16.11.6+deb9u1)



Your message dated Sat, 14 Jul 2018 11:21:20 +0100
with message-id <1531563680.2095.30.camel@adam-barratt.org.uk>
and subject line Closing bugs for updates included in 9.5
has caused the Debian Bug report #896689,
regarding stretch-pu: package dpdk/16.11.6+deb9u1
to be marked as done.

This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.

(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact owner@bugs.debian.org
immediately.)


-- 
896689: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=896689
Debian Bug Tracking System
Contact owner@bugs.debian.org with problems
--- Begin Message ---
Package: release.debian.org
Severity: normal
Tags: stretch
User: release.debian.org@packages.debian.org
Usertags: pu

Dear release team,

We would like to upload a new LTS release version of DPDK to Stretch.
We have already done this previously, and it was approved, for 16.11.4
[1], therefore I already proceeded to upload to stretch-pu in
accordance with the new workflow mentioned recently on debian-devel-
announce [2].

As before with [1], the LTS point release has only bug fixes and no API
changes and has been tested extensively and deployed on Debian Stretch,
which includes running regression tests.

More importantly, this point release fixes CVE-2018-1059 / #896688 [3]
for which the Security Team has previously decided not to do a DSA.

The source debdiff is attached - the only packaging change is dropping
of 2 patches merged upstream, the rest is just upstream code from the
new LTS version.

-- 
Kind regards,
Luca Boccassi

[1] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=884711
[2] https://lists.debian.org/debian-devel-announce/2018/04/msg00007.htm
l
[3] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=896688
diff -Nru dpdk-16.11.4/app/Makefile dpdk-16.11.6/app/Makefile
--- dpdk-16.11.4/app/Makefile	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/Makefile	2018-04-19 15:01:06.000000000 +0100
@@ -36,7 +36,7 @@
 DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
 DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
 DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
-DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += proc_info
+DIRS-$(CONFIG_RTE_PROC_INFO) += proc_info
 DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff -Nru dpdk-16.11.4/app/test/test.c dpdk-16.11.6/app/test/test.c
--- dpdk-16.11.4/app/test/test.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test.c	2018-04-19 15:01:06.000000000 +0100
@@ -165,8 +165,20 @@
 	}
 
 	if (suite->setup)
-		if (suite->setup() != 0)
+		if (suite->setup() != 0) {
+			/*
+			 * setup failed, so count all enabled tests and mark
+			 * them as failed
+			 */
+			while (suite->unit_test_cases[total].testcase) {
+				if (!suite->unit_test_cases[total].enabled)
+					skipped++;
+				else
+					failed++;
+				total++;
+			}
 			goto suite_summary;
+		}
 
 	printf(" + ------------------------------------------------------- +\n");
 
diff -Nru dpdk-16.11.4/app/test/test_cryptodev.c dpdk-16.11.6/app/test/test_cryptodev.c
--- dpdk-16.11.4/app/test/test_cryptodev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_cryptodev.c	2018-04-19 15:01:06.000000000 +0100
@@ -30,6 +30,8 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <time.h>
+
 #include <rte_common.h>
 #include <rte_hexdump.h>
 #include <rte_mbuf.h>
diff -Nru dpdk-16.11.4/app/test/test_memzone.c dpdk-16.11.6/app/test/test_memzone.c
--- dpdk-16.11.4/app/test/test_memzone.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_memzone.c	2018-04-19 15:01:06.000000000 +0100
@@ -33,6 +33,7 @@
 
 #include <stdio.h>
 #include <stdint.h>
+#include <string.h>
 #include <inttypes.h>
 #include <sys/queue.h>
 
@@ -76,6 +77,8 @@
  * - Check flags for specific huge page size reservation
  */
 
+#define TEST_MEMZONE_NAME(suffix) "MZ_TEST_" suffix
+
 /* Test if memory overlaps: return 1 if true, or 0 if false. */
 static int
 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
@@ -92,14 +95,14 @@
 {
 	const struct rte_memzone * mz;
 
-	mz = rte_memzone_lookup("invalid_alignment");
+	mz = rte_memzone_lookup(TEST_MEMZONE_NAME("invalid_alignment"));
 	if (mz != NULL) {
 		printf("Zone with invalid alignment has been reserved\n");
 		return -1;
 	}
 
-	mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
-			SOCKET_ID_ANY, 0, 100);
+	mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("invalid_alignment"),
+					 100, SOCKET_ID_ANY, 0, 100);
 	if (mz != NULL) {
 		printf("Zone with invalid alignment has been reserved\n");
 		return -1;
@@ -112,14 +115,16 @@
 {
 	const struct rte_memzone * mz;
 
-	mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
+	mz = rte_memzone_lookup(
+			TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"));
 	if (mz != NULL) {
 		printf("zone_size_bigger_than_the_maximum has been reserved\n");
 		return -1;
 	}
 
-	mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
-			SOCKET_ID_ANY, 0);
+	mz = rte_memzone_reserve(
+			TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"),
+			(size_t)-1, SOCKET_ID_ANY, 0);
 	if (mz != NULL) {
 		printf("It is impossible to reserve such big a memzone\n");
 		return -1;
@@ -166,8 +171,8 @@
 	 * available page size (i.e 1GB ) when 2MB pages are unavailable.
 	 */
 	if (hugepage_2MB_avail) {
-		mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
-				RTE_MEMZONE_2MB);
+		mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M"),
+				size, SOCKET_ID_ANY, RTE_MEMZONE_2MB);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 2MB\n");
 			return -1;
@@ -181,7 +186,8 @@
 			return -1;
 		}
 
-		mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+		mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
+				size, SOCKET_ID_ANY,
 				RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 2MB\n");
@@ -200,7 +206,9 @@
 		 * HINT flag is indicated
 		 */
 		if (!hugepage_1GB_avail) {
-			mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_1G_HINT"),
+					size, SOCKET_ID_ANY,
 					RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
 			if (mz == NULL) {
 				printf("MEMZONE FLAG 1GB & HINT\n");
@@ -215,8 +223,9 @@
 				return -1;
 			}
 
-			mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
-					RTE_MEMZONE_1GB);
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_1G"), size,
+					SOCKET_ID_ANY, RTE_MEMZONE_1GB);
 			if (mz != NULL) {
 				printf("MEMZONE FLAG 1GB\n");
 				return -1;
@@ -226,8 +235,8 @@
 
 	/*As with 2MB tests above for 1GB huge page requests*/
 	if (hugepage_1GB_avail) {
-		mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
-				RTE_MEMZONE_1GB);
+		mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G"),
+				size, SOCKET_ID_ANY, RTE_MEMZONE_1GB);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 1GB\n");
 			return -1;
@@ -241,7 +250,8 @@
 			return -1;
 		}
 
-		mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
+		mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G_HINT"),
+				size, SOCKET_ID_ANY,
 				RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 1GB\n");
@@ -260,7 +270,9 @@
 		 * HINT flag is indicated
 		 */
 		if (!hugepage_2MB_avail) {
-			mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
+					size, SOCKET_ID_ANY,
 					RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
 			if (mz == NULL){
 				printf("MEMZONE FLAG 2MB & HINT\n");
@@ -274,25 +286,33 @@
 				printf("Fail memzone free\n");
 				return -1;
 			}
-			mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
-					RTE_MEMZONE_2MB);
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_2M"), size,
+					SOCKET_ID_ANY, RTE_MEMZONE_2MB);
 			if (mz != NULL) {
 				printf("MEMZONE FLAG 2MB\n");
 				return -1;
 			}
-			if (rte_memzone_free(mz)) {
-				printf("Fail memzone free\n");
-				return -1;
-			}
 		}
 
 		if (hugepage_2MB_avail && hugepage_1GB_avail) {
-			mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
-								RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
-			if (mz != NULL) {
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
+					size, SOCKET_ID_ANY,
+					RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
+			if (mz == NULL) {
 				printf("BOTH SIZES SET\n");
 				return -1;
 			}
+			if (mz->hugepage_sz != RTE_PGSIZE_1G &&
+					mz->hugepage_sz != RTE_PGSIZE_2M) {
+				printf("Wrong size when both sizes set\n");
+				return -1;
+			}
+			if (rte_memzone_free(mz)) {
+				printf("Fail memzone free\n");
+				return -1;
+			}
 		}
 	}
 	/*
@@ -303,8 +323,8 @@
 	 * page size (i.e 16GB ) when 16MB pages are unavailable.
 	 */
 	if (hugepage_16MB_avail) {
-		mz = rte_memzone_reserve("flag_zone_16M", size, SOCKET_ID_ANY,
-				RTE_MEMZONE_16MB);
+		mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16M"),
+				size, SOCKET_ID_ANY, RTE_MEMZONE_16MB);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 16MB\n");
 			return -1;
@@ -318,8 +338,10 @@
 			return -1;
 		}
 
-		mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
-		SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+		mz = rte_memzone_reserve(
+				TEST_MEMZONE_NAME("flag_zone_16M_HINT"), size,
+				SOCKET_ID_ANY,
+				RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 2MB\n");
 			return -1;
@@ -337,9 +359,11 @@
 		 * unless HINT flag is indicated
 		 */
 		if (!hugepage_16GB_avail) {
-			mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
-				SOCKET_ID_ANY,
-				RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_16G_HINT"),
+					size, SOCKET_ID_ANY,
+					RTE_MEMZONE_16GB |
+					RTE_MEMZONE_SIZE_HINT_ONLY);
 			if (mz == NULL) {
 				printf("MEMZONE FLAG 16GB & HINT\n");
 				return -1;
@@ -353,8 +377,10 @@
 				return -1;
 			}
 
-			mz = rte_memzone_reserve("flag_zone_16G", size,
-				SOCKET_ID_ANY, RTE_MEMZONE_16GB);
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_16G"),
+					size,
+					SOCKET_ID_ANY, RTE_MEMZONE_16GB);
 			if (mz != NULL) {
 				printf("MEMZONE FLAG 16GB\n");
 				return -1;
@@ -363,8 +389,8 @@
 	}
 	/*As with 16MB tests above for 16GB huge page requests*/
 	if (hugepage_16GB_avail) {
-		mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY,
-				RTE_MEMZONE_16GB);
+		mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16G"),
+				size, SOCKET_ID_ANY, RTE_MEMZONE_16GB);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 16GB\n");
 			return -1;
@@ -378,8 +404,10 @@
 			return -1;
 		}
 
-		mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
-		SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+		mz = rte_memzone_reserve(
+				TEST_MEMZONE_NAME("flag_zone_16G_HINT"), size,
+				SOCKET_ID_ANY,
+				RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
 		if (mz == NULL) {
 			printf("MEMZONE FLAG 16GB\n");
 			return -1;
@@ -397,9 +425,11 @@
 		 * unless HINT flag is indicated
 		 */
 		if (!hugepage_16MB_avail) {
-			mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
-				SOCKET_ID_ANY,
-				RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_16M_HINT"),
+					size, SOCKET_ID_ANY,
+					RTE_MEMZONE_16MB |
+					RTE_MEMZONE_SIZE_HINT_ONLY);
 			if (mz == NULL) {
 				printf("MEMZONE FLAG 16MB & HINT\n");
 				return -1;
@@ -412,8 +442,9 @@
 				printf("Fail memzone free\n");
 				return -1;
 			}
-			mz = rte_memzone_reserve("flag_zone_16M", size,
-				SOCKET_ID_ANY, RTE_MEMZONE_16MB);
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_16M"),
+					size, SOCKET_ID_ANY, RTE_MEMZONE_16MB);
 			if (mz != NULL) {
 				printf("MEMZONE FLAG 16MB\n");
 				return -1;
@@ -421,13 +452,23 @@
 		}
 
 		if (hugepage_16MB_avail && hugepage_16GB_avail) {
-			mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
-				SOCKET_ID_ANY,
-				RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
-			if (mz != NULL) {
+			mz = rte_memzone_reserve(
+					TEST_MEMZONE_NAME("flag_zone_16M_HINT"),
+					size, SOCKET_ID_ANY,
+					RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
+			if (mz == NULL) {
 				printf("BOTH SIZES SET\n");
 				return -1;
 			}
+			if (mz->hugepage_sz != RTE_PGSIZE_16G &&
+					mz->hugepage_sz != RTE_PGSIZE_16M) {
+				printf("Wrong size when both sizes set\n");
+				return -1;
+			}
+			if (rte_memzone_free(mz)) {
+				printf("Fail memzone free\n");
+				return -1;
+			}
 		}
 	}
 	return 0;
@@ -470,7 +511,8 @@
 		return 0;
 	}
 
-	mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
+	mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
+			SOCKET_ID_ANY, 0);
 	if (mz == NULL){
 		printf("Failed to reserve a big chunk of memory - %s\n",
 				rte_strerror(rte_errno));
@@ -512,8 +554,8 @@
 		return 0;
 	}
 
-	mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
-			SOCKET_ID_ANY, 0, align);
+	mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("max_zone_aligned"),
+			0, SOCKET_ID_ANY, 0, align);
 	if (mz == NULL){
 		printf("Failed to reserve a big chunk of memory - %s\n",
 				rte_strerror(rte_errno));
@@ -550,24 +592,29 @@
 	const struct rte_memzone *memzone_aligned_1024;
 
 	/* memzone that should automatically be adjusted to align on 64 bytes */
-	memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
-				SOCKET_ID_ANY, 0, 32);
+	memzone_aligned_32 = rte_memzone_reserve_aligned(
+			TEST_MEMZONE_NAME("aligned_32"), 100, SOCKET_ID_ANY, 0,
+			32);
 
 	/* memzone that is supposed to be aligned on a 128 byte boundary */
-	memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
-				SOCKET_ID_ANY, 0, 128);
+	memzone_aligned_128 = rte_memzone_reserve_aligned(
+			TEST_MEMZONE_NAME("aligned_128"), 100, SOCKET_ID_ANY, 0,
+			128);
 
 	/* memzone that is supposed to be aligned on a 256 byte boundary */
-	memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
-				SOCKET_ID_ANY, 0, 256);
+	memzone_aligned_256 = rte_memzone_reserve_aligned(
+			TEST_MEMZONE_NAME("aligned_256"), 100, SOCKET_ID_ANY, 0,
+			256);
 
 	/* memzone that is supposed to be aligned on a 512 byte boundary */
-	memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
-				SOCKET_ID_ANY, 0, 512);
+	memzone_aligned_512 = rte_memzone_reserve_aligned(
+			TEST_MEMZONE_NAME("aligned_512"), 100, SOCKET_ID_ANY, 0,
+			512);
 
 	/* memzone that is supposed to be aligned on a 1024 byte boundary */
-	memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
-				SOCKET_ID_ANY, 0, 1024);
+	memzone_aligned_1024 = rte_memzone_reserve_aligned(
+			TEST_MEMZONE_NAME("aligned_1024"), 100, SOCKET_ID_ANY,
+			0, 1024);
 
 	printf("check alignments and lengths\n");
 	if (memzone_aligned_32 == NULL) {
@@ -736,37 +783,46 @@
 test_memzone_bounded(void)
 {
 	const struct rte_memzone *memzone_err;
-	const char *name;
 	int rc;
 
 	/* should fail as boundary is not power of two */
-	name = "bounded_error_31";
-	if ((memzone_err = rte_memzone_reserve_bounded(name,
-			100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
+	memzone_err = rte_memzone_reserve_bounded(
+			TEST_MEMZONE_NAME("bounded_error_31"), 100,
+			SOCKET_ID_ANY, 0, 32, UINT32_MAX);
+	if (memzone_err != NULL) {
 		printf("%s(%s)created a memzone with invalid boundary "
 			"conditions\n", __func__, memzone_err->name);
 		return -1;
 	}
 
 	/* should fail as len is greater then boundary */
-	name = "bounded_error_32";
-	if ((memzone_err = rte_memzone_reserve_bounded(name,
-			100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
+	memzone_err = rte_memzone_reserve_bounded(
+			TEST_MEMZONE_NAME("bounded_error_32"), 100,
+			SOCKET_ID_ANY, 0, 32, 32);
+	if (memzone_err != NULL) {
 		printf("%s(%s)created a memzone with invalid boundary "
 			"conditions\n", __func__, memzone_err->name);
 		return -1;
 	}
 
-	if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
+	rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_128"), 100, 128,
+			128);
+	if (rc != 0)
 		return rc;
 
-	if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
+	rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_256"), 100, 256,
+			128);
+	if (rc != 0)
 		return rc;
 
-	if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
+	rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K"), 100, 64,
+			1024);
+	if (rc != 0)
 		return rc;
 
-	if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
+	rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K_MAX"), 0, 64,
+			1024);
+	if (rc != 0)
 		return rc;
 
 	return 0;
@@ -775,29 +831,32 @@
 static int
 test_memzone_free(void)
 {
-	const struct rte_memzone *mz[RTE_MAX_MEMZONE];
+	const struct rte_memzone *mz[RTE_MAX_MEMZONE + 1];
 	int i;
 	char name[20];
 
-	mz[0] = rte_memzone_reserve("tempzone0", 2000, SOCKET_ID_ANY, 0);
-	mz[1] = rte_memzone_reserve("tempzone1", 4000, SOCKET_ID_ANY, 0);
+	mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0"), 2000,
+			SOCKET_ID_ANY, 0);
+	mz[1] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone1"), 4000,
+			SOCKET_ID_ANY, 0);
 
 	if (mz[0] > mz[1])
 		return -1;
-	if (!rte_memzone_lookup("tempzone0"))
+	if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0")))
 		return -1;
-	if (!rte_memzone_lookup("tempzone1"))
+	if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1")))
 		return -1;
 
 	if (rte_memzone_free(mz[0])) {
 		printf("Fail memzone free - tempzone0\n");
 		return -1;
 	}
-	if (rte_memzone_lookup("tempzone0")) {
+	if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0"))) {
 		printf("Found previously free memzone - tempzone0\n");
 		return -1;
 	}
-	mz[2] = rte_memzone_reserve("tempzone2", 2000, SOCKET_ID_ANY, 0);
+	mz[2] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone2"), 2000,
+			SOCKET_ID_ANY, 0);
 
 	if (mz[2] > mz[1]) {
 		printf("tempzone2 should have gotten the free entry from tempzone0\n");
@@ -807,7 +866,7 @@
 		printf("Fail memzone free - tempzone2\n");
 		return -1;
 	}
-	if (rte_memzone_lookup("tempzone2")) {
+	if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone2"))) {
 		printf("Found previously free memzone - tempzone2\n");
 		return -1;
 	}
@@ -815,14 +874,15 @@
 		printf("Fail memzone free - tempzone1\n");
 		return -1;
 	}
-	if (rte_memzone_lookup("tempzone1")) {
+	if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1"))) {
 		printf("Found previously free memzone - tempzone1\n");
 		return -1;
 	}
 
 	i = 0;
 	do {
-		snprintf(name, sizeof(name), "tempzone%u", i);
+		snprintf(name, sizeof(name), TEST_MEMZONE_NAME("tempzone%u"),
+				i);
 		mz[i] = rte_memzone_reserve(name, 1, SOCKET_ID_ANY, 0);
 	} while (mz[i++] != NULL);
 
@@ -830,7 +890,8 @@
 		printf("Fail memzone free - tempzone0\n");
 		return -1;
 	}
-	mz[0] = rte_memzone_reserve("tempzone0new", 0, SOCKET_ID_ANY, 0);
+	mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0new"), 0,
+			SOCKET_ID_ANY, 0);
 
 	if (mz[0] == NULL) {
 		printf("Fail to create memzone - tempzone0new - when MAX memzones were "
@@ -857,16 +918,16 @@
 	const struct rte_memzone *memzone4;
 	const struct rte_memzone *mz;
 
-	memzone1 = rte_memzone_reserve("testzone1", 100,
+	memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
 				SOCKET_ID_ANY, 0);
 
-	memzone2 = rte_memzone_reserve("testzone2", 1000,
+	memzone2 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone2"), 1000,
 				0, 0);
 
-	memzone3 = rte_memzone_reserve("testzone3", 1000,
+	memzone3 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone3"), 1000,
 				1, 0);
 
-	memzone4 = rte_memzone_reserve("testzone4", 1024,
+	memzone4 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone4"), 1024,
 				SOCKET_ID_ANY, 0);
 
 	/* memzone3 may be NULL if we don't have NUMA */
@@ -918,12 +979,12 @@
 		return -1;
 
 	printf("test zone lookup\n");
-	mz = rte_memzone_lookup("testzone1");
+	mz = rte_memzone_lookup(TEST_MEMZONE_NAME("testzone1"));
 	if (mz != memzone1)
 		return -1;
 
 	printf("test duplcate zone name\n");
-	mz = rte_memzone_reserve("testzone1", 100,
+	mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
 			SOCKET_ID_ANY, 0);
 	if (mz != NULL)
 		return -1;
@@ -948,16 +1009,22 @@
 	return 0;
 }
 
-static int memzone_calk_called;
-static void memzone_walk_clb(const struct rte_memzone *mz __rte_unused,
+static int test_memzones_left;
+static int memzone_walk_cnt;
+static void memzone_walk_clb(const struct rte_memzone *mz,
 			     void *arg __rte_unused)
 {
-	memzone_calk_called = 1;
+	memzone_walk_cnt++;
+	if (!strncmp(TEST_MEMZONE_NAME(""), mz->name, RTE_MEMZONE_NAMESIZE))
+		test_memzones_left++;
 }
 
 static int
 test_memzone(void)
 {
+	/* take note of how many memzones were allocated before running */
+	int memzone_cnt = rte_eal_get_configuration()->mem_config->memzone_cnt;
+
 	printf("test basic memzone API\n");
 	if (test_memzone_basic() < 0)
 		return -1;
@@ -995,8 +1062,10 @@
 		return -1;
 
 	printf("check memzone cleanup\n");
+	memzone_walk_cnt = 0;
+	test_memzones_left = 0;
 	rte_memzone_walk(memzone_walk_clb, NULL);
-	if (memzone_calk_called) {
+	if (memzone_walk_cnt != memzone_cnt || test_memzones_left > 0) {
 		printf("there are some memzones left after test\n");
 		rte_memzone_dump(stdout);
 		return -1;
diff -Nru dpdk-16.11.4/app/test/test_pmd_perf.c dpdk-16.11.6/app/test/test_pmd_perf.c
--- dpdk-16.11.4/app/test/test_pmd_perf.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_pmd_perf.c	2018-04-19 15:01:06.000000000 +0100
@@ -321,10 +321,10 @@
 	return (uint16_t)-1;
 }
 
-volatile uint64_t stop;
-uint64_t count;
-uint64_t drop;
-uint64_t idle;
+static volatile uint64_t stop;
+static uint64_t count;
+static uint64_t drop;
+static uint64_t idle;
 
 static void
 reset_count(void)
@@ -557,7 +557,7 @@
 	return 0;
 }
 
-rte_atomic64_t start;
+static rte_atomic64_t start;
 
 static inline int
 poll_burst(void *args)
diff -Nru dpdk-16.11.4/app/test/test_reorder.c dpdk-16.11.6/app/test/test_reorder.c
--- dpdk-16.11.4/app/test/test_reorder.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_reorder.c	2018-04-19 15:01:06.000000000 +0100
@@ -362,9 +362,20 @@
 	return 0;
 }
 
+static void
+test_teardown(void)
+{
+	rte_reorder_free(test_params->b);
+	test_params->b = NULL;
+	rte_mempool_free(test_params->p);
+	test_params->p = NULL;
+}
+
+
 static struct unit_test_suite reorder_test_suite  = {
 
 	.setup = test_setup,
+	.teardown = test_teardown,
 	.suite_name = "Reorder Unit Test Suite",
 	.unit_test_cases = {
 		TEST_CASE(test_reorder_create),
diff -Nru dpdk-16.11.4/app/test/test_ring_perf.c dpdk-16.11.6/app/test/test_ring_perf.c
--- dpdk-16.11.4/app/test/test_ring_perf.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_ring_perf.c	2018-04-19 15:01:06.000000000 +0100
@@ -60,9 +60,6 @@
  */
 static const volatile unsigned bulk_sizes[] = { 8, 32 };
 
-/* The ring structure used for tests */
-static struct rte_ring *r;
-
 struct lcore_pair {
 	unsigned c1, c2;
 };
@@ -143,7 +140,7 @@
 
 /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
 static void
-test_empty_dequeue(void)
+test_empty_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 26;
 	const unsigned iterations = 1<<iter_shift;
@@ -171,6 +168,7 @@
  * and return two. Input = burst size, output = cycle average for sp/sc & mp/mc
  */
 struct thread_params {
+	struct rte_ring *r;
 	unsigned size;        /* input value, the burst size */
 	double spsc, mpmc;    /* output value, the single or multi timings */
 };
@@ -185,6 +183,7 @@
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
 	struct thread_params *params = p;
+	struct rte_ring *r = params->r;
 	const unsigned size = params->size;
 	unsigned i;
 	void *burst[MAX_BURST] = {0};
@@ -220,6 +219,7 @@
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
 	struct thread_params *params = p;
+	struct rte_ring *r = params->r;
 	const unsigned size = params->size;
 	unsigned i;
 	void *burst[MAX_BURST] = {0};
@@ -250,7 +250,7 @@
  * used to measure ring perf between hyperthreads, cores and sockets.
  */
 static void
-run_on_core_pair(struct lcore_pair *cores,
+run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r,
 		lcore_function_t f1, lcore_function_t f2)
 {
 	struct thread_params param1 = {0}, param2 = {0};
@@ -258,6 +258,7 @@
 	for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {
 		lcore_count = 0;
 		param1.size = param2.size = bulk_sizes[i];
+		param1.r = param2.r = r;
 		if (cores->c1 == rte_get_master_lcore()) {
 			rte_eal_remote_launch(f2, &param2, cores->c2);
 			f1(&param1);
@@ -280,7 +281,7 @@
  * takes on a single lcore. Result is for comparison with the bulk enq+deq.
  */
 static void
-test_single_enqueue_dequeue(void)
+test_single_enqueue_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 24;
 	const unsigned iterations = 1<<iter_shift;
@@ -313,7 +314,7 @@
  * as for the bulk function called on a single lcore.
  */
 static void
-test_burst_enqueue_dequeue(void)
+test_burst_enqueue_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
@@ -347,7 +348,7 @@
 
 /* Times enqueue and dequeue on a single lcore */
 static void
-test_bulk_enqueue_dequeue(void)
+test_bulk_enqueue_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
@@ -385,32 +386,35 @@
 test_ring_perf(void)
 {
 	struct lcore_pair cores;
+	struct rte_ring *r = NULL;
+
 	r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(), 0);
-	if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
+	if (r == NULL)
 		return -1;
 
 	printf("### Testing single element and burst enq/deq ###\n");
-	test_single_enqueue_dequeue();
-	test_burst_enqueue_dequeue();
+	test_single_enqueue_dequeue(r);
+	test_burst_enqueue_dequeue(r);
 
 	printf("\n### Testing empty dequeue ###\n");
-	test_empty_dequeue();
+	test_empty_dequeue(r);
 
 	printf("\n### Testing using a single lcore ###\n");
-	test_bulk_enqueue_dequeue();
+	test_bulk_enqueue_dequeue(r);
 
 	if (get_two_hyperthreads(&cores) == 0) {
 		printf("\n### Testing using two hyperthreads ###\n");
-		run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+		run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
 	}
 	if (get_two_cores(&cores) == 0) {
 		printf("\n### Testing using two physical cores ###\n");
-		run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+		run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
 	}
 	if (get_two_sockets(&cores) == 0) {
 		printf("\n### Testing using two NUMA nodes ###\n");
-		run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+		run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
 	}
+	rte_ring_free(r);
 	return 0;
 }
 
diff -Nru dpdk-16.11.4/app/test/test_table_acl.c dpdk-16.11.6/app/test/test_table_acl.c
--- dpdk-16.11.4/app/test/test_table_acl.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_table_acl.c	2018-04-19 15:01:06.000000000 +0100
@@ -532,6 +532,8 @@
 		struct rte_pipeline_table_entry *table_entries[5];
 		int key_found[5];
 
+		memset(table_entries, 0, sizeof(table_entries));
+
 		for (n = 0; n < 5; n++) {
 			memset(&keys[n], 0, sizeof(struct rte_table_acl_rule_delete_params));
 			key_array[n] = &keys[n];
diff -Nru dpdk-16.11.4/app/test/test_table.c dpdk-16.11.6/app/test/test_table.c
--- dpdk-16.11.4/app/test/test_table.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_table.c	2018-04-19 15:01:06.000000000 +0100
@@ -83,6 +83,14 @@
 }
 
 static void
+app_free_resources(void) {
+	int i;
+	for (i = 0; i < N_PORTS; i++)
+		rte_ring_free(rings_rx[i]);
+	rte_mempool_free(pool);
+}
+
+static void
 app_init_mbuf_pools(void)
 {
 	/* Init the buffer pool */
@@ -141,18 +149,20 @@
 static int
 test_table(void)
 {
-	int status, failures;
+	int status, ret;
 	unsigned i;
 
-	failures = 0;
+	ret = TEST_SUCCESS;
 
 	app_init_rings();
 	app_init_mbuf_pools();
 
 	printf("\n\n\n\n************Pipeline tests************\n");
 
-	if (test_table_pipeline() < 0)
-		return -1;
+	if (test_table_pipeline() < 0) {
+		ret = TEST_FAILED;
+		goto end;
+	}
 
 	printf("\n\n\n\n************Port tests************\n");
 	for (i = 0; i < n_port_tests; i++) {
@@ -160,8 +170,8 @@
 		if (status < 0) {
 			printf("\nPort test number %d failed (%d).\n", i,
 				status);
-			failures++;
-			return -1;
+			ret = TEST_FAILED;
+			goto end;
 		}
 	}
 
@@ -171,8 +181,8 @@
 		if (status < 0) {
 			printf("\nTable test number %d failed (%d).\n", i,
 				status);
-			failures++;
-			return -1;
+			ret = TEST_FAILED;
+			goto end;
 		}
 	}
 
@@ -182,21 +192,23 @@
 		if (status < 0) {
 			printf("\nCombined table test number %d failed with "
 				"reason number %d.\n", i, status);
-			failures++;
-			return -1;
+			ret = TEST_FAILED;
+			goto end;
 		}
 	}
 
-	if (failures)
-		return -1;
-
 #ifdef RTE_LIBRTE_ACL
 	printf("\n\n\n\n************ACL tests************\n");
-	if (test_table_acl() < 0)
-		return -1;
+	if (test_table_acl() < 0) {
+		ret = TEST_FAILED;
+		goto end;
+	}
 #endif
 
-	return 0;
+end:
+	app_free_resources();
+
+	return ret;
 }
 
 REGISTER_TEST_COMMAND(table_autotest, test_table);
diff -Nru dpdk-16.11.4/app/test/test_timer_perf.c dpdk-16.11.6/app/test/test_timer_perf.c
--- dpdk-16.11.4/app/test/test_timer_perf.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test/test_timer_perf.c	2018-04-19 15:01:06.000000000 +0100
@@ -155,6 +155,7 @@
 	printf("Time per rte_timer_manage with zero callbacks: %"PRIu64" cycles\n",
 			(end_tsc - start_tsc + iterations/2) / iterations);
 
+	rte_free(tms);
 	return 0;
 }
 
diff -Nru dpdk-16.11.4/app/test-pmd/cmdline.c dpdk-16.11.6/app/test-pmd/cmdline.c
--- dpdk-16.11.4/app/test-pmd/cmdline.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test-pmd/cmdline.c	2018-04-19 15:01:06.000000000 +0100
@@ -8387,11 +8387,11 @@
 	uint16_t port_dst;
 	cmdline_fixed_string_t verify_tag;
 	uint32_t verify_tag_value;
-	cmdline_ipaddr_t tos;
+	cmdline_fixed_string_t tos;
 	uint8_t tos_value;
-	cmdline_ipaddr_t proto;
+	cmdline_fixed_string_t proto;
 	uint8_t proto_value;
-	cmdline_ipaddr_t ttl;
+	cmdline_fixed_string_t ttl;
 	uint8_t ttl_value;
 	cmdline_fixed_string_t vlan;
 	uint16_t vlan_value;
@@ -8923,7 +8923,7 @@
 		(void *)&cmd_flow_director_flow_type,
 		(void *)&cmd_flow_director_src,
 		(void *)&cmd_flow_director_ip_src,
-		(void *)&cmd_flow_director_port_dst,
+		(void *)&cmd_flow_director_port_src,
 		(void *)&cmd_flow_director_dst,
 		(void *)&cmd_flow_director_ip_dst,
 		(void *)&cmd_flow_director_port_dst,
diff -Nru dpdk-16.11.4/app/test-pmd/config.c dpdk-16.11.6/app/test-pmd/config.c
--- dpdk-16.11.4/app/test-pmd/config.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test-pmd/config.c	2018-04-19 15:01:06.000000000 +0100
@@ -1155,6 +1155,36 @@
 	}
 }
 
+static portid_t
+fwd_topology_tx_port_get(portid_t rxp)
+{
+	static int warning_once = 1;
+
+	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
+
+	switch (port_topology) {
+	default:
+	case PORT_TOPOLOGY_PAIRED:
+		if ((rxp & 0x1) == 0) {
+			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
+				return rxp + 1;
+			if (warning_once) {
+				printf("\nWarning! port-topology=paired"
+				       " and odd forward ports number,"
+				       " the last port will pair with"
+				       " itself.\n\n");
+				warning_once = 0;
+			}
+			return rxp;
+		}
+		return rxp - 1;
+	case PORT_TOPOLOGY_CHAINED:
+		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
+	case PORT_TOPOLOGY_LOOP:
+		return rxp;
+	}
+}
+
 static void
 simple_fwd_config_setup(void)
 {
@@ -1217,11 +1247,6 @@
  * For the RSS forwarding test all streams distributed over lcores. Each stream
  * being composed of a RX queue to poll on a RX port for input messages,
  * associated with a TX queue of a TX port where to send forwarded packets.
- * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
- * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
- * following rules:
- *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
- *    - TxQl = RxQj
  */
 static void
 rss_fwd_config_setup(void)
@@ -1253,18 +1278,7 @@
 		struct fwd_stream *fs;
 
 		fs = fwd_streams[sm_id];
-
-		if ((rxp & 0x1) == 0)
-			txp = (portid_t) (rxp + 1);
-		else
-			txp = (portid_t) (rxp - 1);
-		/*
-		 * if we are in loopback, simply send stuff out through the
-		 * ingress port
-		 */
-		if (port_topology == PORT_TOPOLOGY_LOOP)
-			txp = rxp;
-
+		txp = fwd_topology_tx_port_get(rxp);
 		fs->rx_port = fwd_ports_ids[rxp];
 		fs->rx_queue = rxq;
 		fs->tx_port = fwd_ports_ids[txp];
@@ -1279,11 +1293,7 @@
 		 * Restart from RX queue 0 on next RX port
 		 */
 		rxq = 0;
-		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
-			rxp = (portid_t)
-				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
-		else
-			rxp = (portid_t) (rxp + 1);
+		rxp++;
 	}
 }
 
diff -Nru dpdk-16.11.4/app/test-pmd/txonly.c dpdk-16.11.6/app/test-pmd/txonly.c
--- dpdk-16.11.4/app/test-pmd/txonly.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/app/test-pmd/txonly.c	2018-04-19 15:01:06.000000000 +0100
@@ -106,6 +106,7 @@
 		buf = ((char*) buf + copy_len);
 		seg = seg->next;
 		seg_buf = rte_pktmbuf_mtod(seg, char *);
+		copy_len = seg->data_len;
 	}
 	rte_memcpy(seg_buf, buf, (size_t) len);
 }
diff -Nru dpdk-16.11.4/buildtools/pmdinfogen/pmdinfogen.c dpdk-16.11.6/buildtools/pmdinfogen/pmdinfogen.c
--- dpdk-16.11.4/buildtools/pmdinfogen/pmdinfogen.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/buildtools/pmdinfogen/pmdinfogen.c	2018-04-19 15:01:06.000000000 +0100
@@ -158,7 +158,8 @@
 		 * There are more than 64k sections,
 		 * read count from .sh_size.
 		 */
-		info->num_sections = TO_NATIVE(endian, 32, sechdrs[0].sh_size);
+		info->num_sections =
+			TO_NATIVE(endian, ADDR_SIZE, sechdrs[0].sh_size);
 	} else {
 		info->num_sections = hdr->e_shnum;
 	}
@@ -181,7 +182,7 @@
 		sechdrs[i].sh_offset    =
 			TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_offset);
 		sechdrs[i].sh_size      =
-			TO_NATIVE(endian, 32, sechdrs[i].sh_size);
+			TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_size);
 		sechdrs[i].sh_link      =
 			TO_NATIVE(endian, 32, sechdrs[i].sh_link);
 		sechdrs[i].sh_info      =
diff -Nru dpdk-16.11.4/config/common_base dpdk-16.11.6/config/common_base
--- dpdk-16.11.4/config/common_base	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/config/common_base	2018-04-19 15:01:06.000000000 +0100
@@ -584,6 +584,11 @@
 CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
 
 #
+# Compile the procinfo application
+#
+CONFIG_RTE_PROC_INFO=n
+
+#
 # Compile the PMD test application
 #
 CONFIG_RTE_TEST_PMD=y
diff -Nru dpdk-16.11.4/config/common_linuxapp dpdk-16.11.6/config/common_linuxapp
--- dpdk-16.11.4/config/common_linuxapp	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/config/common_linuxapp	2018-04-19 15:01:06.000000000 +0100
@@ -44,3 +44,4 @@
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_POWER=y
 CONFIG_RTE_VIRTIO_USER=y
+CONFIG_RTE_PROC_INFO=y
diff -Nru dpdk-16.11.4/debian/changelog dpdk-16.11.6/debian/changelog
--- dpdk-16.11.4/debian/changelog	2018-02-11 18:33:18.000000000 +0000
+++ dpdk-16.11.6/debian/changelog	2018-04-23 14:33:27.000000000 +0100
@@ -1,3 +1,14 @@
+dpdk (16.11.6-1+deb9u1) stretch; urgency=medium
+
+  * Merge stable update to 16.11.5; For a list of changes see
+    http://dpdk.org/ml/archives/announce/2018-March/000180.html
+  * Merge stable update to 16.11.6; For a list of changes see
+    https://dpdk.org/doc/guides-16.11/rel_notes/release_16_11.html
+    * Fixes CVE-2018-1059 (Closes: #896688).
+  * Drop patches merged upstream in 16.11.6.
+
+ -- Luca Boccassi <bluca@debian.org>  Mon, 23 Apr 2018 14:33:27 +0100
+
 dpdk (16.11.4-1+deb9u1) stretch; urgency=medium
 
   [ Luca Boccassi ]
diff -Nru dpdk-16.11.4/debian/patches/dpdk-dev-v3-eal-sPAPR-IOMMU-support-in-pci-probing-for-vfio-pci-in-ppc64le.patch dpdk-16.11.6/debian/patches/dpdk-dev-v3-eal-sPAPR-IOMMU-support-in-pci-probing-for-vfio-pci-in-ppc64le.patch
--- dpdk-16.11.4/debian/patches/dpdk-dev-v3-eal-sPAPR-IOMMU-support-in-pci-probing-for-vfio-pci-in-ppc64le.patch	2018-02-11 18:33:18.000000000 +0000
+++ dpdk-16.11.6/debian/patches/dpdk-dev-v3-eal-sPAPR-IOMMU-support-in-pci-probing-for-vfio-pci-in-ppc64le.patch	1970-01-01 01:00:00.000000000 +0100
@@ -1,165 +0,0 @@
-Description: eal: sPAPR IOMMU support in pci probing for vfio-pci in ppc64le
-
-    From: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
-
-    Below changes adds pci probing support for vfio-pci devices in power8.
-
-    v3 - better validation for kernel not implementing few iocts called
-    v2 - kernel version checked and doc updated
-
-    Signed-off-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
-
-Note: can be dropped >=DPDK 17.05
-
-Forwarded: yes, http://dpdk.org/dev/patchwork/patch/21482/
-Original-Author: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
-Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/dpdk/+bug/1670689
-Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-Last-Update: 2017-03-07
-
---- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
-+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
-@@ -50,12 +50,15 @@
- static struct vfio_config vfio_cfg;
- 
- static int vfio_type1_dma_map(int);
-+static int vfio_spapr_dma_map(int);
- static int vfio_noiommu_dma_map(int);
- 
- /* IOMMU types we support */
- static const struct vfio_iommu_type iommu_types[] = {
- 	/* x86 IOMMU, otherwise known as type 1 */
- 	{ RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
-+	/* ppc64 IOMMU, otherwise known as spapr */
-+	{ RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
- 	/* IOMMU-less mode */
- 	{ RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
- };
-@@ -537,6 +540,93 @@
- 	}
- 
- 	return 0;
-+}
-+
-+static int
-+vfio_spapr_dma_map(int vfio_container_fd)
-+{
-+	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-+	int i, ret;
-+
-+	struct vfio_iommu_spapr_register_memory reg = {
-+		.argsz = sizeof(reg),
-+		.flags = 0
-+	};
-+	struct vfio_iommu_spapr_tce_info info = {
-+		.argsz = sizeof(info),
-+	};
-+	struct vfio_iommu_spapr_tce_create create = {
-+		.argsz = sizeof(create),
-+	};
-+	struct vfio_iommu_spapr_tce_remove remove = {
-+		.argsz = sizeof(remove),
-+	};
-+
-+	/* query spapr iommu info */
-+	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
-+	if (ret) {
-+		RTE_LOG(ERR, EAL, "  cannot get iommu info, "
-+				"error %i (%s)\n", errno, strerror(errno));
-+		return -1;
-+	}
-+
-+	/* remove default DMA of 32 bit window */
-+	remove.start_addr = info.dma32_window_start;
-+	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
-+	if (ret) {
-+		RTE_LOG(ERR, EAL, "  cannot remove default DMA window, "
-+				"error %i (%s)\n", errno, strerror(errno));
-+		return -1;
-+	}
-+
-+	/* calculate window size based on number of hugepages configured */
-+	create.window_size = rte_eal_get_physmem_size();
-+	create.page_shift = __builtin_ctzll(ms->hugepage_sz);
-+	create.levels = 2;
-+
-+	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
-+	if (ret) {
-+		RTE_LOG(ERR, EAL, "  cannot create new DMA window, "
-+				"error %i (%s)\n", errno, strerror(errno));
-+		return -1;
-+	}
-+
-+	/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
-+	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
-+		struct vfio_iommu_type1_dma_map dma_map;
-+
-+		if (ms[i].addr == NULL)
-+			break;
-+
-+		reg.vaddr = (uintptr_t) ms[i].addr;
-+		reg.size = ms[i].len;
-+		ret = ioctl(vfio_container_fd,
-+			VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
-+		if (ret) {
-+			RTE_LOG(ERR, EAL, "  cannot register vaddr for IOMMU, "
-+				"error %i (%s)\n", errno, strerror(errno));
-+			return -1;
-+		}
-+
-+		memset(&dma_map, 0, sizeof(dma_map));
-+		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
-+		dma_map.vaddr = ms[i].addr_64;
-+		dma_map.size = ms[i].len;
-+		dma_map.iova = ms[i].phys_addr;
-+		dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
-+				 VFIO_DMA_MAP_FLAG_WRITE;
-+
-+		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
-+
-+		if (ret) {
-+			RTE_LOG(ERR, EAL, "  cannot set up DMA remapping, "
-+				"error %i (%s)\n", errno, strerror(errno));
-+			return -1;
-+		}
-+
-+	}
-+
-+	return 0;
- }
- 
- static int
---- a/lib/librte_eal/linuxapp/eal/eal_vfio.h
-+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h
-@@ -54,6 +54,31 @@
- 
- #define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
- 
-+#ifndef VFIO_SPAPR_TCE_v2_IOMMU
-+#define RTE_VFIO_SPAPR 7
-+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
-+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
-+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
-+struct vfio_iommu_spapr_register_memory {
-+	uint32_t argsz;
-+	uint32_t flags;
-+	uint64_t vaddr;
-+	uint64_t size;
-+};
-+struct vfio_iommu_spapr_tce_create {
-+	uint32_t argsz;
-+	uint32_t page_shift;
-+	uint64_t window_size;
-+	uint32_t levels;
-+};
-+struct vfio_iommu_spapr_tce_remove {
-+	uint32_t argsz;
-+	uint64_t start_addr;
-+};
-+#else
-+#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU
-+#endif
-+
- #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
- #define RTE_VFIO_NOIOMMU 8
- #else
diff -Nru dpdk-16.11.4/debian/patches/igb_uio-switch-to-new-irq-function-for-MSI-X.patch dpdk-16.11.6/debian/patches/igb_uio-switch-to-new-irq-function-for-MSI-X.patch
--- dpdk-16.11.4/debian/patches/igb_uio-switch-to-new-irq-function-for-MSI-X.patch	2018-02-11 18:33:18.000000000 +0000
+++ dpdk-16.11.6/debian/patches/igb_uio-switch-to-new-irq-function-for-MSI-X.patch	1970-01-01 01:00:00.000000000 +0100
@@ -1,74 +0,0 @@
-Description: igb_uio: switch to new irq function for MSI-X
-
-From 99bb58f3adc73046b538874a0944578146ee1189 Mon Sep 17 00:00:00 2001
-From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
-Date: Thu, 20 Apr 2017 15:11:23 +0200
-Subject: [PATCH] igb_uio: switch to new irq function for MSI-X
-
-pci_enable_msix() will be removed in kernel 4.12.
-The new API pci_alloc_irq_vectors() is available
-since linux 4.8, thus let's use it.
-
-Link: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=aff171641d18
-Link: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=4244de1c64de
-
-Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
-Reviewed-by: David Marchand <david.marchand@6wind.com>
-Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
----
- lib/librte_eal/linuxapp/igb_uio/compat.h  |  4 ++++
- lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 11 +++++++++++
- 2 files changed, 15 insertions(+)
-
-Forwarded: no (backport)
-Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-Original-Author: Nicolas Dichtel <nicolas.dichtel@6wind.com>
-Origin: http://dpdk.org/browse/dpdk/commit/?id=99bb58f3adc73046b538874a0944578146ee1189
-Bug-Ubuntu: https://bugs.launchpad.net/bugs/1700768
-Patch-Name: debian/patches/igb_uio-switch-to-new-irq-function-for-MSI-X.patch
-Last-Update: 2017-06-27
---- a/lib/librte_eal/linuxapp/igb_uio/compat.h
-+++ b/lib/librte_eal/linuxapp/igb_uio/compat.h
-@@ -123,3 +123,7 @@
- }
- 
- #endif /* < 3.3.0 */
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
-+#define HAVE_PCI_ENABLE_MSIX
-+#endif
---- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-@@ -325,7 +325,9 @@
- igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
- {
- 	struct rte_uio_pci_dev *udev;
-+#ifdef HAVE_PCI_ENABLE_MSIX
- 	struct msix_entry msix_entry;
-+#endif
- 	int err;
- 
- 	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
-@@ -379,6 +381,7 @@
- 	switch (igbuio_intr_mode_preferred) {
- 	case RTE_INTR_MODE_MSIX:
- 		/* Only 1 msi-x vector needed */
-+#ifdef HAVE_PCI_ENABLE_MSIX
- 		msix_entry.entry = 0;
- 		if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
- 			dev_dbg(&dev->dev, "using MSI-X");
-@@ -386,6 +389,14 @@
- 			udev->mode = RTE_INTR_MODE_MSIX;
- 			break;
- 		}
-+#else
-+		if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) {
-+			dev_dbg(&dev->dev, "using MSI-X");
-+			udev->info.irq = pci_irq_vector(dev, 0);
-+			udev->mode = RTE_INTR_MODE_MSIX;
-+			break;
-+		}
-+#endif
- 		/* fall back to INTX */
- 	case RTE_INTR_MODE_LEGACY:
- 		if (pci_intx_mask_supported(dev)) {
diff -Nru dpdk-16.11.4/debian/patches/series dpdk-16.11.6/debian/patches/series
--- dpdk-16.11.4/debian/patches/series	2018-02-11 18:33:18.000000000 +0000
+++ dpdk-16.11.6/debian/patches/series	2018-04-23 14:30:59.000000000 +0100
@@ -1,4 +1,2 @@
 fix-vhost-user-socket-permission.patch
 fix-power-default-config.patch
-dpdk-dev-v3-eal-sPAPR-IOMMU-support-in-pci-probing-for-vfio-pci-in-ppc64le.patch
-igb_uio-switch-to-new-irq-function-for-MSI-X.patch
diff -Nru dpdk-16.11.4/doc/guides/cryptodevs/aesni_mb.rst dpdk-16.11.6/doc/guides/cryptodevs/aesni_mb.rst
--- dpdk-16.11.4/doc/guides/cryptodevs/aesni_mb.rst	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/doc/guides/cryptodevs/aesni_mb.rst	2018-04-19 15:01:06.000000000 +0100
@@ -34,7 +34,7 @@
 The AESNI MB PMD (**librte_pmd_aesni_mb**) provides poll mode crypto driver
 support for utilizing Intel multi buffer library, see the white paper
 `Fast Multi-buffer IPsec Implementations on Intel® Architecture Processors
-<https://www-ssl.intel.com/content/www/us/en/intelligent-systems/intel-technology/fast-multi-buffer-ipsec-implementations-ia-processors-paper.html?wapkw=multi+buffer>`_.
+<https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-multi-buffer-ipsec-implementations-ia-processors-paper.pdf>`_.
 
 The AES-NI MB PMD has current only been tested on Fedora 21 64-bit with gcc.
 
diff -Nru dpdk-16.11.4/doc/guides/nics/features/i40e.ini dpdk-16.11.6/doc/guides/nics/features/i40e.ini
--- dpdk-16.11.4/doc/guides/nics/features/i40e.ini	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/doc/guides/nics/features/i40e.ini	2018-04-19 15:01:06.000000000 +0100
@@ -46,3 +46,4 @@
 x86-32               = Y
 x86-64               = Y
 ARMv8                = Y
+Power8               = Y
diff -Nru dpdk-16.11.4/doc/guides/nics/features/i40e_vec.ini dpdk-16.11.6/doc/guides/nics/features/i40e_vec.ini
--- dpdk-16.11.4/doc/guides/nics/features/i40e_vec.ini	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/doc/guides/nics/features/i40e_vec.ini	2018-04-19 15:01:06.000000000 +0100
@@ -38,3 +38,4 @@
 x86-32               = Y
 x86-64               = Y
 ARMv8                = Y
+Power8               = Y
diff -Nru dpdk-16.11.4/doc/guides/nics/i40e.rst dpdk-16.11.6/doc/guides/nics/i40e.rst
--- dpdk-16.11.4/doc/guides/nics/i40e.rst	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/doc/guides/nics/i40e.rst	2018-04-19 15:01:06.000000000 +0100
@@ -130,6 +130,21 @@
   Interrupt Throttling interval.
 
 
+Runtime Config Options
+~~~~~~~~~~~~~~~~~~~~~~
+
+- ``Support multiple driver`` (default ``disable``)
+
+  There was a multiple driver support issue during use of 700 series Ethernet
+  Adapter with both Linux kernel and DPDK PMD. To fix this issue, ``devargs``
+  parameter ``support-multi-driver`` is introduced, for example::
+
+    -w 84:00.0,support-multi-driver=1
+
+  With the above configuration, DPDK PMD will not change global registers, and
+  will switch PF interrupt from IntN to Int0 to avoid interrupt conflict between
+  DPDK and Linux Kernel.
+
 Driver Compilation
 ~~~~~~~~~~~~~~~~~~
 
@@ -459,3 +474,15 @@
 
 Due to the FW limitation, PF can receive packets with Ethertype 0x88A8
 only when floating VEB is disabled.
+
+Global configuration warning
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+I40E PMD will set some global registers to enable some function or set some
+configure. Then when using different ports of the same NIC with Linux kernel
+and DPDK, the port with Linux kernel will be impacted by the port with DPDK.
+For example, register I40E_GL_SWT_L2TAGCTRL is used to control L2 tag, i40e
+PMD uses I40E_GL_SWT_L2TAGCTRL to set vlan TPID. If setting TPID in port A
+with DPDK, then the configuration will also impact port B in the NIC with
+kernel driver, which don't want to use the TPID.
+So PMD reports warning to clarify what is changed by writing global register.
diff -Nru dpdk-16.11.4/doc/guides/rel_notes/release_16_11.rst dpdk-16.11.6/doc/guides/rel_notes/release_16_11.rst
--- dpdk-16.11.4/doc/guides/rel_notes/release_16_11.rst	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/doc/guides/rel_notes/release_16_11.rst	2018-04-19 15:01:06.000000000 +0100
@@ -1028,3 +1028,146 @@
 * uio: fix compilation with -Og
 * usertools: fix device binding with python 3
 * vfio: fix close unchecked file descriptor
+
+16.11.5
+~~~~~~~
+
+* app/procinfo: add compilation option in config
+* app/testpmd: fix crash of txonly with multiple segments
+* app/testpmd: fix flow director filter
+* app/testpmd: fix port index in RSS forward config
+* app/testpmd: fix port topology in RSS forward config
+* bus/pci: fix interrupt handler type
+* contigmem: fix build on FreeBSD 12
+* crypto/qat: fix allocation check and leak
+* crypto/qat: fix null auth algo overwrite
+* doc: fix outdated link to IPsec white paper
+* eal/ppc: remove the braces in memory barrier macros
+* eal/ppc: support sPAPR IOMMU for vfio-pci
+* eal: update assertion macro
+* eal/x86: use lock-prefixed instructions for SMP barrier
+* ethdev: fix data alignment
+* ethdev: fix link autonegotiation value
+* ethdev: fix missing imissed counter in xstats
+* ethdev: fix typo in functions comment
+* examples/bond: check mbuf allocation
+* examples/exception_path: align stats on cache line
+* examples/ip_pipeline: fix timer period unit
+* examples/ipsec-secgw: fix corner case for SPI value
+* examples/l3fwd-power: fix frequency detection
+* examples/l3fwd-power: fix Rx without interrupt
+* examples/vhost: fix sending ARP packet to self
+* examples/vhost: fix startup check
+* igb_uio: fix IRQ disable on recent kernels
+* igb_uio: fix MSI-X IRQ assignment with new IRQ function
+* igb_uio: switch to new irq function for MSI-X
+* keepalive: fix state alignment
+* kni: fix build with kernel 4.15
+* lpm: fix ARM big endian build
+* malloc: fix end for bounded elements
+* malloc: protect stats with lock
+* mbuf: cleanup function to get last segment
+* mbuf: fix NULL freeing when debug enabled
+* mem: fix mmap error check on huge page attach
+* memzone: fix leak on allocation error
+* mk: fix external build
+* mk: support renamed Makefile in external project
+* net/bnxt: fix broadcast cofiguration
+* net/bnxt: fix group info usage
+* net/bnxt: fix headroom initialization
+* net/bnxt: fix link speed setting with autoneg off
+* net/bnxt: fix Rx checksum flags
+* net/bnxt: fix size of Tx ring in HW
+* net/bnxt: parse checksum offload flags
+* net/bnxt: support new PCI IDs
+* net/bonding: check error of MAC address setting
+* net/bonding: fix activated slave in 8023ad mode
+* net/bonding: fix setting slave MAC addresses
+* net/e1000: fix mailbox interrupt handler
+* net/e1000: fix VF Rx interrupt enabling
+* net/ena: do not set Tx L4 offloads in Rx path
+* net/enic: fix crash due to static max number of queues
+* net/fm10k: fix logical port delete
+* net/i40e: add debug logs when writing global registers
+* net/i40e: add warnings when writing global registers
+* net/i40e/base: fix compile issue for GCC 6.3
+* net/i40e/base: fix link LED blink
+* net/i40e/base: fix NVM lock
+* net/i40e: check multi-driver option parsing
+* net/i40e: fix ARM big endian build
+* net/i40e: fix flag for MAC address write
+* net/i40e: fix flow director Rx resource defect
+* net/i40e: fix interrupt conflict when using multi-driver
+* net/i40e: fix multiple driver support issue
+* net/i40e: fix Rx interrupt
+* net/i40e: fix VF reset stats crash
+* net/i40e: fix VF Rx interrupt enabling
+* net/i40e: fix VLAN offload setting
+* net/i40e: fix VSI MAC filter on primary address change
+* net/i40e: implement vector PMD for altivec
+* net/igb: fix Tx queue number assignment
+* net/ixgbe/base: add media type of fixed fiber
+* net/ixgbe: fix ARM big endian build
+* net/ixgbe: fix mailbox interrupt handler
+* net/ixgbe: fix max queue number for VF
+* net/ixgbe: fix reset error handling
+* net/ixgbe: fix the failure of number of Tx queue check
+* net/ixgbe: fix VF Rx interrupt enabling
+* net/ixgbe: improve link state check on VF
+* net/mlx5: fix deadlock of link status alarm
+* net/mlx5: fix missing RSS capability
+* net/mlx5: fix MTU update
+* net/nfp: fix CRC strip check behaviour
+* net/nfp: fix jumbo settings
+* net/nfp: fix MTU settings
+* net/pcap: fix the NUMA id display in logs
+* net/qede/base: fix VF LRO tunnel configuration
+* net/qede: fix clearing of queue stats
+* net/qede: fix few log messages
+* net/qede: fix MTU set and max Rx pkt len usage
+* net/qede: fix to reject config with no Rx queue
+* net/szedata2: fix check of mmap return value
+* net/thunderx: fix multi segment Tx function return
+* net/vhost: fix log messages on create/destroy
+* net/virtio: fix incorrect cast
+* net/virtio: fix mbuf data offset for simple Rx
+* net/virtio: fix memory leak when reinitializing device
+* net/virtio: fix queue flushing with vector Rx enabled
+* net/virtio: fix resuming port with Rx vector path
+* net/virtio: fix Rx and Tx handler selection for ARM32
+* net/virtio: fix typo in function name
+* net/virtio: fix vector Rx flushing
+* net/virtio-user: fix start with kernel vhost
+* pdump: fix error check when creating/canceling thread
+* pmdinfogen: fix cross compilation for ARM big endian
+* test/crypto: fix missing include
+* test/memzone: fix freeing test
+* test/memzone: fix NULL freeing
+* test/memzone: fix wrong test
+* test/memzone: handle previously allocated memzones
+* test/pmd_perf: declare variables as static
+* test: register test as failed if setup failed
+* test/reorder: fix memory leak
+* test/ring_perf: fix memory leak
+* test/table: fix memory leak
+* test/table: fix uninitialized parameter
+* test/timer_perf: fix memory leak
+* usertools/devbind: remove unused function
+* vfio: fix enabled check on error
+* vhost: do not take lock on owner reset
+* vhost: fix crash
+* vhost: fix dequeue zero copy with virtio1
+* vhost: fix error code check when creating thread
+* vhost: fix mbuf free
+* vhost: protect active rings from async ring changes
+
+16.11.6
+~~~~~~~
+
+* vhost: add support for non-contiguous indirect descs tables (fixes CVE-2018-1059)
+* vhost: check all range is mapped when translating GPAs (fixes CVE-2018-1059)
+* vhost: ensure all range is mapped when translating QVAs (fixes CVE-2018-1059)
+* vhost: handle virtually non-contiguous buffers in Rx (fixes CVE-2018-1059)
+* vhost: handle virtually non-contiguous buffers in Rx-mrg (fixes CVE-2018-1059)
+* vhost: handle virtually non-contiguous buffers in Tx (fixes CVE-2018-1059)
+* vhost-user: fix deadlock in case of NUMA realloc
diff -Nru dpdk-16.11.4/doc/guides/sample_app_ug/keep_alive.rst dpdk-16.11.6/doc/guides/sample_app_ug/keep_alive.rst
--- dpdk-16.11.4/doc/guides/sample_app_ug/keep_alive.rst	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/doc/guides/sample_app_ug/keep_alive.rst	2018-04-19 15:01:06.000000000 +0100
@@ -186,5 +186,5 @@
     static inline void
     rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
     {
-        keepcfg->state_flags[rte_lcore_id()] = ALIVE;
+        keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_ALIVE;
     }
diff -Nru dpdk-16.11.4/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c dpdk-16.11.6/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
--- dpdk-16.11.4/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c	2018-04-19 15:01:06.000000000 +0100
@@ -334,6 +334,11 @@
 
 		in = rte_zmalloc("working mem for key",
 				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+		if (in == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to alloc memory");
+			return -ENOMEM;
+		}
+
 		rte_memcpy(in, qat_aes_xcbc_key_seed,
 				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
 		for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
@@ -364,6 +369,11 @@
 				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
 		in = rte_zmalloc("working mem for key",
 				ICP_QAT_HW_GALOIS_H_SZ, 16);
+		if (in == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to alloc memory");
+			return -ENOMEM;
+		}
+
 		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
 		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
 			&enc_key) != 0) {
diff -Nru dpdk-16.11.4/drivers/crypto/qat/qat_crypto.c dpdk-16.11.6/drivers/crypto/qat/qat_crypto.c
--- dpdk-16.11.4/drivers/crypto/qat/qat_crypto.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/crypto/qat/qat_crypto.c	2018-04-19 15:01:06.000000000 +0100
@@ -1085,8 +1085,9 @@
 			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
 					qat_req->comn_hdr.serv_specif_flags,
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
+			if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+				auth_param->auth_res_addr =
+						op->sym->auth.digest.phys_addr;
 			digest_appended = 0;
 		}
 
diff -Nru dpdk-16.11.4/drivers/net/af_packet/rte_eth_af_packet.c dpdk-16.11.6/drivers/net/af_packet/rte_eth_af_packet.c
--- dpdk-16.11.4/drivers/net/af_packet/rte_eth_af_packet.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/af_packet/rte_eth_af_packet.c	2018-04-19 15:01:06.000000000 +0100
@@ -121,7 +121,7 @@
 	.link_speed = ETH_SPEED_NUM_10G,
 	.link_duplex = ETH_LINK_FULL_DUPLEX,
 	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_SPEED_AUTONEG
+	.link_autoneg = ETH_LINK_AUTONEG
 };
 
 static uint16_t
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_ethdev.c dpdk-16.11.6/drivers/net/bnxt/bnxt_ethdev.c
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -63,19 +63,34 @@
 #define BROADCOM_DEV_ID_57302 0x16c9
 #define BROADCOM_DEV_ID_57304_PF 0x16ca
 #define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57417_MF 0x16cc
 #define BROADCOM_DEV_ID_NS2 0x16cd
+#define BROADCOM_DEV_ID_57311 0x16ce
+#define BROADCOM_DEV_ID_57312 0x16cf
 #define BROADCOM_DEV_ID_57402 0x16d0
 #define BROADCOM_DEV_ID_57404 0x16d1
 #define BROADCOM_DEV_ID_57406_PF 0x16d2
 #define BROADCOM_DEV_ID_57406_VF 0x16d3
 #define BROADCOM_DEV_ID_57402_MF 0x16d4
 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_57412 0x16d6
+#define BROADCOM_DEV_ID_57414 0x16d7
+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_57412_MF 0x16de
+#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
 #define BROADCOM_DEV_ID_57404_MF 0x16e7
 #define BROADCOM_DEV_ID_57406_MF 0x16e8
 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
 #define BROADCOM_DEV_ID_57407_MF 0x16ea
+#define BROADCOM_DEV_ID_57414_MF 0x16ec
+#define BROADCOM_DEV_ID_57416_MF 0x16ee
 
 static struct rte_pci_id bnxt_pci_id_map[] = {
 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -95,6 +110,21 @@
 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
+	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
@@ -285,7 +315,9 @@
 {
 	int rc;
 
-	bnxt_init_ring_grps(bp);
+	rc = bnxt_init_ring_grps(bp);
+	if (rc)
+		return rc;
 	bnxt_init_vnics(bp);
 	bnxt_init_filters(bp);
 
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt.h dpdk-16.11.6/drivers/net/bnxt/bnxt.h
--- dpdk-16.11.4/drivers/net/bnxt/bnxt.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt.h	2018-04-19 15:01:06.000000000 +0100
@@ -110,6 +110,7 @@
 	uint16_t		link_speed;
 	uint16_t		support_speeds;
 	uint16_t		auto_link_speed;
+	uint16_t		force_link_speed;
 	uint16_t		auto_link_speed_mask;
 	uint32_t		preemphasis;
 	uint8_t			phy_type;
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_hwrm.c dpdk-16.11.6/drivers/net/bnxt/bnxt_hwrm.c
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_hwrm.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_hwrm.c	2018-04-19 15:01:06.000000000 +0100
@@ -174,9 +174,9 @@
 	 * by ethtool.
 	 */
 	if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
-		mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
 	if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
-		mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
 	req.mask = rte_cpu_to_le_32(mask);
 
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -504,7 +504,8 @@
 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
 		}
 		/* AutoNeg - Advertise speeds specified. */
-		if (conf->auto_link_speed_mask) {
+		if (conf->auto_link_speed_mask &&
+		    !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
 			req.auto_mode =
 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
 			req.auto_link_speed_mask =
@@ -566,6 +567,7 @@
 	link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
 	link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
 	link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+	link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
 	link_info->phy_ver[0] = resp->phy_maj;
 	link_info->phy_ver[1] = resp->phy_min;
 	link_info->phy_ver[2] = resp->phy_bld;
@@ -604,7 +606,7 @@
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 			 struct bnxt_ring *ring,
 			 uint32_t ring_type, uint32_t map_index,
-			 uint32_t stats_ctx_id)
+			 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
 {
 	int rc = 0;
 	struct hwrm_ring_alloc_input req = {.req_type = 0 };
@@ -625,11 +627,12 @@
 		/* FALLTHROUGH */
 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
 		req.ring_type = ring_type;
-		req.cmpl_ring_id =
-		    rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
+		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
 		req.length = rte_cpu_to_le_32(ring->ring_size);
 		req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
-		req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
+		if (stats_ctx_id != INVALID_STATS_CTX_ID)
+			req.enables =
+			rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
 		break;
 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
@@ -796,7 +799,9 @@
 	HWRM_CHECK_RESULT;
 
 	cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
-	bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+	//Tx rings don't need grp_info entry. It is a Rx only attribute.
+	if (idx)
+		bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
 	return rc;
 }
@@ -818,7 +823,9 @@
 	HWRM_CHECK_RESULT;
 
 	cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
-	bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+	//Tx rings don't have a grp_info entry. It is a Rx only attribute.
+	if (idx)
+		bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
 	return rc;
 }
@@ -1025,10 +1032,13 @@
 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
 		unsigned int idx = i + 1;
 
-		if (i >= bp->rx_cp_nr_rings)
+		if (i >= bp->rx_cp_nr_rings) {
 			cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
-		else
+			//Tx rings don't have a grp_info entry.
+			idx = 0;
+		} else {
 			cpr = bp->rx_queues[i]->cp_ring;
+		}
 		if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
 			rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
 			if (rc)
@@ -1052,6 +1062,8 @@
 		if (i >= bp->rx_cp_nr_rings) {
 			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
 			cpr = txq->cp_ring;
+			//Tx rings don't need grp_info entry.
+			idx = 0;
 		} else {
 			rxq = bp->rx_queues[i];
 			cpr = rxq->cp_ring;
@@ -1089,14 +1101,13 @@
 }
 
 static void bnxt_free_cp_ring(struct bnxt *bp,
-			      struct bnxt_cp_ring_info *cpr, unsigned int idx)
+			      struct bnxt_cp_ring_info *cpr)
 {
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
 	bnxt_hwrm_ring_free(bp, cp_ring,
 			HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
 	cp_ring->fw_ring_id = INVALID_HW_RING_ID;
-	bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
 	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
 			sizeof(*cpr->cp_desc_ring));
 	cpr->cp_raw_cons = 0;
@@ -1112,7 +1123,6 @@
 		struct bnxt_tx_ring_info *txr = txq->tx_ring;
 		struct bnxt_ring *ring = txr->tx_ring_struct;
 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
-		unsigned int idx = bp->rx_cp_nr_rings + i + 1;
 
 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
 			bnxt_hwrm_ring_free(bp, ring,
@@ -1128,7 +1138,7 @@
 			txr->tx_cons = 0;
 		}
 		if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
-			bnxt_free_cp_ring(bp, cpr, idx);
+			bnxt_free_cp_ring(bp, cpr);
 	}
 
 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1152,7 +1162,8 @@
 			rxr->rx_prod = 0;
 		}
 		if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
-			bnxt_free_cp_ring(bp, cpr, idx);
+			bnxt_free_cp_ring(bp, cpr);
+		bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
 	}
 
 	/* Default completion ring */
@@ -1160,7 +1171,8 @@
 		struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
 
 		if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
-			bnxt_free_cp_ring(bp, cpr, 0);
+			bnxt_free_cp_ring(bp, cpr);
+		bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
 	}
 
 	return rc;
@@ -1511,7 +1523,9 @@
 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
 	speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
 	link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
-	if (autoneg == 1) {
+	/* Autoneg can be done only when the FW allows */
+	if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+				bp->link_info.force_link_speed)) {
 		link_req.phy_flags |=
 				HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
 		link_req.auto_link_speed_mask =
@@ -1529,7 +1543,13 @@
 		}
 
 		link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
-		link_req.link_speed = speed;
+		/* If user wants a particular speed try that first. */
+		if (speed)
+			link_req.link_speed = speed;
+		else if (bp->link_info.force_link_speed)
+			link_req.link_speed = bp->link_info.force_link_speed;
+		else
+			link_req.link_speed = bp->link_info.auto_link_speed;
 	}
 	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
 	link_req.auto_pause = bp->link_info.auto_pause;
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_hwrm.h dpdk-16.11.6/drivers/net/bnxt/bnxt_hwrm.h
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_hwrm.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_hwrm.h	2018-04-19 15:01:06.000000000 +0100
@@ -65,7 +65,7 @@
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 			 struct bnxt_ring *ring,
 			 uint32_t ring_type, uint32_t map_index,
-			 uint32_t stats_ctx_id);
+			 uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
 int bnxt_hwrm_ring_free(struct bnxt *bp,
 			struct bnxt_ring *ring, uint32_t ring_type);
 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
@@ -101,5 +101,7 @@
 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
 int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+#define HWRM_RING_ALLOC_INPUT_EN_STAT_CTX_ID_VALID \
+	HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
 
 #endif
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_ring.c dpdk-16.11.6/drivers/net/bnxt/bnxt_ring.c
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_ring.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_ring.c	2018-04-19 15:01:06.000000000 +0100
@@ -61,13 +61,19 @@
  * Ring groups
  */
 
-void bnxt_init_ring_grps(struct bnxt *bp)
+int bnxt_init_ring_grps(struct bnxt *bp)
 {
 	unsigned int i;
 
+	//One slot is still consumed by Default ring.
+	if (bp->max_ring_grps < 1 + bp->rx_cp_nr_rings)
+		return -ENOMEM;
+
 	for (i = 0; i < bp->max_ring_grps; i++)
 		memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
 		       sizeof(struct bnxt_ring_grp_info));
+
+	return 0;
 }
 
 /*
@@ -219,7 +225,8 @@
 
 		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
 					  HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-					  0, HWRM_NA_SIGNATURE);
+					  0, HWRM_NA_SIGNATURE,
+					  HWRM_NA_SIGNATURE);
 		if (rc)
 			goto err_out;
 		cpr->cp_doorbell =
@@ -239,7 +246,8 @@
 		/* Rx cmpl */
 		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
 					HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-					idx, HWRM_NA_SIGNATURE);
+					idx, HWRM_NA_SIGNATURE,
+					HWRM_NA_SIGNATURE);
 		if (rc)
 			goto err_out;
 		cpr->cp_doorbell =
@@ -251,7 +259,8 @@
 		/* Rx ring */
 		rc = bnxt_hwrm_ring_alloc(bp, ring,
 					HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
-					idx, cpr->hw_stats_ctx_id);
+					idx, cpr->hw_stats_ctx_id,
+					cp_ring->fw_ring_id);
 		if (rc)
 			goto err_out;
 		rxr->rx_prod = 0;
@@ -279,20 +288,21 @@
 		/* Tx cmpl */
 		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
 					HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-					idx, HWRM_NA_SIGNATURE);
+					idx, HWRM_NA_SIGNATURE,
+					HWRM_NA_SIGNATURE);
 		if (rc)
 			goto err_out;
 
 		cpr->cp_doorbell =
 		    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
 		    idx * 0x80;
-		bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
 		B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
 
 		/* Tx ring */
 		rc = bnxt_hwrm_ring_alloc(bp, ring,
 					HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
-					idx, cpr->hw_stats_ctx_id);
+					idx, cpr->hw_stats_ctx_id,
+					cp_ring->fw_ring_id);
 		if (rc)
 			goto err_out;
 
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_ring.h dpdk-16.11.6/drivers/net/bnxt/bnxt_ring.h
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_ring.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_ring.h	2018-04-19 15:01:06.000000000 +0100
@@ -65,6 +65,7 @@
 #define MAX_CP_DESC_CNT (16 * 1024)
 
 #define INVALID_HW_RING_ID      ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID	((uint16_t)-1)
 
 struct bnxt_ring {
 	void			*bd;
@@ -92,7 +93,7 @@
 struct bnxt_rx_ring_info;
 struct bnxt_cp_ring_info;
 void bnxt_free_ring(struct bnxt_ring *ring);
-void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_init_ring_grps(struct bnxt *bp);
 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
 			    struct bnxt_tx_ring_info *tx_ring_info,
 			    struct bnxt_rx_ring_info *rx_ring_info,
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_rxr.c dpdk-16.11.6/drivers/net/bnxt/bnxt_rxr.c
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_rxr.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_rxr.c	2018-04-19 15:01:06.000000000 +0100
@@ -72,7 +72,7 @@
 
 	rx_buf->mbuf = data;
 
-	rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+	rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(data));
 
 	return 0;
 }
@@ -126,6 +126,7 @@
 	mbuf = rx_buf->mbuf;
 	rte_prefetch0(mbuf);
 
+	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
 	mbuf->nb_segs = 1;
 	mbuf->next = NULL;
 	mbuf->pkt_len = rxcmp->len;
@@ -152,12 +153,12 @@
 	if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
 	else
-		mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
 
 	if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
 	else
-		mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
 
 	if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
 		/* Re-install the mbuf back to the rx ring */
diff -Nru dpdk-16.11.4/drivers/net/bnxt/bnxt_txr.c dpdk-16.11.6/drivers/net/bnxt/bnxt_txr.c
--- dpdk-16.11.4/drivers/net/bnxt/bnxt_txr.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bnxt/bnxt_txr.c	2018-04-19 15:01:06.000000000 +0100
@@ -101,7 +101,7 @@
 	if (ring == NULL)
 		return -ENOMEM;
 	txr->tx_ring_struct = ring;
-	ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+	ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
 	ring->ring_mask = ring->ring_size - 1;
 	ring->bd = (void *)txr->tx_desc_ring;
 	ring->bd_dma = txr->tx_desc_mapping;
@@ -216,23 +216,28 @@
 					tx_pkt->l4_len;
 			txbd1->mss = tx_pkt->tso_segsz;
 
-		} else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
+			   PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
 			txbd1->mss = 0;
-		} else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
+			   PKT_TX_IIP_TCP_UDP_CKSUM) {
 			/* (Inner) IP, (Inner) TCP/UDP CSO */
 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
 			txbd1->mss = 0;
-		} else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
+			   PKT_TX_OIP_TCP_UDP_CKSUM) {
 			/* Outer IP, (Inner) TCP/UDP CSO */
 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
 			txbd1->mss = 0;
-		} else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
+			   PKT_TX_OIP_IIP_CKSUM) {
 			/* Outer IP, Inner IP CSO */
 			txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
 			txbd1->mss = 0;
-		} else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
+		} else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
+			   PKT_TX_TCP_UDP_CKSUM) {
 			/* TCP/UDP CSO */
 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
 			txbd1->mss = 0;
diff -Nru dpdk-16.11.4/drivers/net/bonding/rte_eth_bond_8023ad.c dpdk-16.11.6/drivers/net/bonding/rte_eth_bond_8023ad.c
--- dpdk-16.11.4/drivers/net/bonding/rte_eth_bond_8023ad.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bonding/rte_eth_bond_8023ad.c	2018-04-19 15:01:06.000000000 +0100
@@ -1109,7 +1109,8 @@
 	uint8_t i;
 
 	for (i = 0; i < internals->active_slave_count; i++)
-		bond_mode_8023ad_activate_slave(bond_dev, i);
+		bond_mode_8023ad_activate_slave(bond_dev,
+				internals->active_slaves[i]);
 
 	return 0;
 }
diff -Nru dpdk-16.11.4/drivers/net/bonding/rte_eth_bond_api.c dpdk-16.11.6/drivers/net/bonding/rte_eth_bond_api.c
--- dpdk-16.11.4/drivers/net/bonding/rte_eth_bond_api.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bonding/rte_eth_bond_api.c	2018-04-19 15:01:06.000000000 +0100
@@ -412,8 +412,13 @@
 	if (internals->slave_count < 1) {
 		/* if MAC is not user defined then use MAC of first slave add to
 		 * bonded device */
-		if (!internals->user_defined_mac)
-			mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
+		if (!internals->user_defined_mac) {
+			if (mac_address_set(bonded_eth_dev,
+					    slave_eth_dev->data->mac_addrs)) {
+				RTE_BOND_LOG(ERR, "Failed to set MAC address");
+				return -1;
+			}
+		}
 
 		/* Inherit eth dev link properties from first slave */
 		link_properties_set(bonded_eth_dev,
@@ -565,7 +570,7 @@
 			&rte_eth_devices[bonded_port_id].data->port_id);
 
 	/* Restore original MAC address of slave device */
-	mac_address_set(&rte_eth_devices[slave_port_id],
+	rte_eth_dev_default_mac_addr_set(slave_port_id,
 			&(internals->slaves[slave_idx].persisted_mac_addr));
 
 	slave_eth_dev = &rte_eth_devices[slave_port_id];
diff -Nru dpdk-16.11.4/drivers/net/bonding/rte_eth_bond_pmd.c dpdk-16.11.6/drivers/net/bonding/rte_eth_bond_pmd.c
--- dpdk-16.11.4/drivers/net/bonding/rte_eth_bond_pmd.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/bonding/rte_eth_bond_pmd.c	2018-04-19 15:01:06.000000000 +0100
@@ -1217,7 +1217,8 @@
 	case BONDING_MODE_BALANCE:
 	case BONDING_MODE_BROADCAST:
 		for (i = 0; i < internals->slave_count; i++) {
-			if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
+			if (rte_eth_dev_default_mac_addr_set(
+					internals->slaves[i].port_id,
 					bonded_eth_dev->data->mac_addrs)) {
 				RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
 						internals->slaves[i].port_id);
@@ -1235,15 +1236,16 @@
 		for (i = 0; i < internals->slave_count; i++) {
 			if (internals->slaves[i].port_id ==
 					internals->current_primary_port) {
-				if (mac_address_set(&rte_eth_devices[internals->primary_port],
+				if (rte_eth_dev_default_mac_addr_set(
+						internals->primary_port,
 						bonded_eth_dev->data->mac_addrs)) {
 					RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
 							internals->current_primary_port);
 					return -1;
 				}
 			} else {
-				if (mac_address_set(
-						&rte_eth_devices[internals->slaves[i].port_id],
+				if (rte_eth_dev_default_mac_addr_set(
+						internals->slaves[i].port_id,
 						&internals->slaves[i].persisted_mac_addr)) {
 					RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
 							internals->slaves[i].port_id);
diff -Nru dpdk-16.11.4/drivers/net/e1000/em_ethdev.c dpdk-16.11.6/drivers/net/e1000/em_ethdev.c
--- dpdk-16.11.4/drivers/net/e1000/em_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/e1000/em_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -1145,7 +1145,7 @@
 		link.link_speed = 0;
 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
 		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_SPEED_FIXED;
+		link.link_autoneg = ETH_LINK_FIXED;
 	}
 	rte_em_dev_atomic_write_link_status(dev, &link);
 
diff -Nru dpdk-16.11.4/drivers/net/e1000/igb_ethdev.c dpdk-16.11.6/drivers/net/e1000/igb_ethdev.c
--- dpdk-16.11.4/drivers/net/e1000/igb_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/e1000/igb_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -1121,7 +1121,7 @@
 	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
 	enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
-	uint16_t nb_tx_q = dev->data->nb_rx_queues;
+	uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
 	if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
 	    tx_mq_mode == ETH_MQ_TX_DCB ||
@@ -2226,7 +2226,7 @@
 		link.link_speed = 0;
 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
 		link.link_status = ETH_LINK_DOWN;
-		link.link_autoneg = ETH_LINK_SPEED_FIXED;
+		link.link_autoneg = ETH_LINK_FIXED;
 	}
 	rte_igb_dev_atomic_write_link_status(dev, &link);
 
@@ -2757,12 +2757,17 @@
 	struct e1000_mbx_info *mbx = &hw->mbx;
 	u32 in_msg = 0;
 
-	if (mbx->ops.read(hw, &in_msg, 1, 0))
-		return;
+	/* peek the message first */
+	in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
 
 	/* PF reset VF event */
-	if (in_msg == E1000_PF_CONTROL_MSG)
-		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+	if (in_msg == E1000_PF_CONTROL_MSG) {
+		/* dummy mbx read to ack pf */
+		if (mbx->ops.read(hw, &in_msg, 1, 0))
+			return;
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+					      NULL);
+	}
 }
 
 static int
@@ -3085,7 +3090,8 @@
 	}
 
 	/* check and configure queue intr-vector mapping */
-	if (dev->data->dev_conf.intr_conf.rxq != 0) {
+	if (rte_intr_cap_multiple(intr_handle) &&
+	    dev->data->dev_conf.intr_conf.rxq) {
 		intr_vector = dev->data->nb_rx_queues;
 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
 		if (ret)
diff -Nru dpdk-16.11.4/drivers/net/ena/ena_ethdev.c dpdk-16.11.6/drivers/net/ena/ena_ethdev.c
--- dpdk-16.11.4/drivers/net/ena/ena_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ena/ena_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -248,16 +248,17 @@
 				       struct ena_com_rx_ctx *ena_rx_ctx)
 {
 	uint64_t ol_flags = 0;
+	uint32_t packet_type = 0;
 
 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
-		ol_flags |= PKT_TX_TCP_CKSUM;
+		packet_type |= RTE_PTYPE_L4_TCP;
 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
-		ol_flags |= PKT_TX_UDP_CKSUM;
+		packet_type |= RTE_PTYPE_L4_UDP;
 
 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
-		ol_flags |= PKT_TX_IPV4;
+		packet_type |= RTE_PTYPE_L3_IPV4;
 	else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
-		ol_flags |= PKT_TX_IPV6;
+		packet_type |= RTE_PTYPE_L3_IPV6;
 
 	if (unlikely(ena_rx_ctx->l4_csum_err))
 		ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -265,6 +266,7 @@
 		ol_flags |= PKT_RX_IP_CKSUM_BAD;
 
 	mbuf->ol_flags = ol_flags;
+	mbuf->packet_type = packet_type;
 }
 
 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
diff -Nru dpdk-16.11.4/drivers/net/enic/enic_ethdev.c dpdk-16.11.6/drivers/net/enic/enic_ethdev.c
--- dpdk-16.11.4/drivers/net/enic/enic_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/enic/enic_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -184,13 +184,7 @@
 	struct enic *enic = pmd_priv(eth_dev);
 
 	ENICPMD_FUNC_TRACE();
-	if (queue_idx >= ENIC_WQ_MAX) {
-		dev_err(enic,
-			"Max number of TX queues exceeded.  Max is %d\n",
-			ENIC_WQ_MAX);
-		return -EINVAL;
-	}
-
+	RTE_ASSERT(queue_idx < enic->conf_wq_count);
 	eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
 
 	ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
@@ -302,16 +296,8 @@
 	struct enic *enic = pmd_priv(eth_dev);
 
 	ENICPMD_FUNC_TRACE();
-	/* With Rx scatter support, two RQs are now used on VIC per RQ used
-	 * by the application.
-	 */
-	if (queue_idx * 2 >= ENIC_RQ_MAX) {
-		dev_err(enic,
-			"Max number of RX queues exceeded.  Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
-			ENIC_RQ_MAX);
-		return -EINVAL;
-	}
 
+	RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
 	eth_dev->data->rx_queues[queue_idx] =
 		(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
 
diff -Nru dpdk-16.11.4/drivers/net/enic/enic.h dpdk-16.11.6/drivers/net/enic/enic.h
--- dpdk-16.11.4/drivers/net/enic/enic.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/enic/enic.h	2018-04-19 15:01:06.000000000 +0100
@@ -53,14 +53,6 @@
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Poll-mode Driver"
 #define DRV_COPYRIGHT		"Copyright 2008-2015 Cisco Systems, Inc"
 
-#define ENIC_WQ_MAX		8
-/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
- * RQs use the same CQ.
- */
-#define ENIC_RQ_MAX		16
-#define ENIC_CQ_MAX		(ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
-#define ENIC_INTR_MAX		(ENIC_CQ_MAX + 2)
-
 #define VLAN_ETH_HLEN           18
 
 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
@@ -139,17 +131,17 @@
 	unsigned int flags;
 	unsigned int priv_flags;
 
-	/* work queue */
-	struct vnic_wq wq[ENIC_WQ_MAX];
-	unsigned int wq_count;
-
-	/* receive queue */
-	struct vnic_rq rq[ENIC_RQ_MAX];
-	unsigned int rq_count;
-
-	/* completion queue */
-	struct vnic_cq cq[ENIC_CQ_MAX];
-	unsigned int cq_count;
+	/* work queue (len = conf_wq_count) */
+	struct vnic_wq *wq;
+	unsigned int wq_count; /* equals eth_dev nb_tx_queues */
+
+	/* receive queue (len = conf_rq_count) */
+	struct vnic_rq *rq;
+	unsigned int rq_count; /* equals eth_dev nb_rx_queues */
+
+	/* completion queue (len = conf_cq_count) */
+	struct vnic_cq *cq;
+	unsigned int cq_count; /* equals rq_count + wq_count */
 
 	/* interrupt resource */
 	struct vnic_intr intr;
diff -Nru dpdk-16.11.4/drivers/net/enic/enic_main.c dpdk-16.11.6/drivers/net/enic/enic_main.c
--- dpdk-16.11.4/drivers/net/enic/enic_main.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/enic/enic_main.c	2018-04-19 15:01:06.000000000 +0100
@@ -1080,6 +1080,9 @@
 	vnic_dev_notify_unset(enic->vdev);
 
 	rte_free(eth_dev->data->mac_addrs);
+	rte_free(enic->cq);
+	rte_free(enic->rq);
+	rte_free(enic->wq);
 }
 
 
@@ -1087,27 +1090,28 @@
 {
 	struct rte_eth_dev *eth_dev = enic->rte_dev;
 	int rc = 0;
+	unsigned int required_rq, required_wq, required_cq;
 
-	/* With Rx scatter support, two RQs are now used per RQ used by
-	 * the application.
-	 */
-	if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+	/* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+	required_rq = eth_dev->data->nb_rx_queues * 2;
+	required_wq = eth_dev->data->nb_tx_queues;
+	required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+
+	if (enic->conf_rq_count < required_rq) {
 		dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
 			eth_dev->data->nb_rx_queues,
-			eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+			required_rq, enic->conf_rq_count);
 		rc = -EINVAL;
 	}
-	if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+	if (enic->conf_wq_count < required_wq) {
 		dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
 			eth_dev->data->nb_tx_queues, enic->conf_wq_count);
 		rc = -EINVAL;
 	}
 
-	if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
-				   eth_dev->data->nb_tx_queues)) {
+	if (enic->conf_cq_count < required_cq) {
 		dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
-			(eth_dev->data->nb_rx_queues +
-			 eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+			required_cq, enic->conf_cq_count);
 		rc = -EINVAL;
 	}
 
@@ -1309,6 +1313,25 @@
 		dev_err(enic, "See the ENIC PMD guide for more information.\n");
 		return -EINVAL;
 	}
+	/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+	enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+			       enic->conf_cq_count, 8);
+	enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+			       enic->conf_rq_count, 8);
+	enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+			       enic->conf_wq_count, 8);
+	if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+		dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+		return -1;
+	}
+	if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+		dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+		return -1;
+	}
+	if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+		dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+		return -1;
+	}
 
 	/* Get the supported filters */
 	enic_fdir_info(enic);
diff -Nru dpdk-16.11.4/drivers/net/fm10k/fm10k_ethdev.c dpdk-16.11.6/drivers/net/fm10k/fm10k_ethdev.c
--- dpdk-16.11.4/drivers/net/fm10k/fm10k_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/fm10k/fm10k_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -53,7 +53,7 @@
 /* Wait interval to get switch status */
 #define WAIT_SWITCH_MSG_US    100000
 /* A period of quiescence for switch */
-#define FM10K_SWITCH_QUIESCE_US 10000
+#define FM10K_SWITCH_QUIESCE_US 100000
 /* Number of chars per uint32 type */
 #define CHARS_PER_UINT32 (sizeof(uint32_t))
 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
@@ -1239,7 +1239,7 @@
 		MAX_LPORT_NUM, false);
 	fm10k_mbx_unlock(hw);
 
-	/* allow 10ms for device to quiesce */
+	/* allow 100ms for device to quiesce */
 	rte_delay_us(FM10K_SWITCH_QUIESCE_US);
 
 	/* Stop mailbox service first */
diff -Nru dpdk-16.11.4/drivers/net/i40e/base/i40e_adminq.c dpdk-16.11.6/drivers/net/i40e/base/i40e_adminq.c
--- dpdk-16.11.4/drivers/net/i40e/base/i40e_adminq.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/base/i40e_adminq.c	2018-04-19 15:01:06.000000000 +0100
@@ -682,6 +682,12 @@
 			   &oem_lo);
 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
+	/* Newer versions of firmware require lock when reading the NVM */
+	if ((hw->aq.api_maj_ver > 1) ||
+	    ((hw->aq.api_maj_ver == 1) &&
+	     (hw->aq.api_min_ver >= 5)))
+		hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
 	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
 		goto init_adminq_free_arq;
@@ -1051,22 +1057,19 @@
 	}
 
 	/* set next_to_use to head */
-#ifdef PF_DRIVER
 #ifdef INTEGRATED_VF
 	if (!i40e_is_vf(hw))
-		ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+		ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+	else
+		ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 #else
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+#ifdef PF_DRIVER
+	ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
 #endif /* PF_DRIVER */
 #ifdef VF_DRIVER
-#ifdef INTEGRATED_VF
-	if (i40e_is_vf(hw))
-		ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#else
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+	ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 #endif /* VF_DRIVER */
+#endif /* INTEGRATED_VF */
 	if (ntu == ntc) {
 		/* nothing to do - shouldn't need to update ring's values */
 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff -Nru dpdk-16.11.4/drivers/net/i40e/base/i40e_common.c dpdk-16.11.6/drivers/net/i40e/base/i40e_common.c
--- dpdk-16.11.4/drivers/net/i40e/base/i40e_common.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/base/i40e_common.c	2018-04-19 15:01:06.000000000 +0100
@@ -1046,7 +1046,8 @@
 
 #ifdef X722_SUPPORT
 	if (hw->mac.type == I40E_MAC_X722)
-		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
 
 #endif
 	status = i40e_init_nvm(hw);
@@ -1578,6 +1579,7 @@
 		case I40E_COMBINED_ACTIVITY:
 		case I40E_FILTER_ACTIVITY:
 		case I40E_MAC_ACTIVITY:
+		case I40E_LINK_ACTIVITY:
 			continue;
 		default:
 			break;
@@ -1626,6 +1628,7 @@
 		case I40E_COMBINED_ACTIVITY:
 		case I40E_FILTER_ACTIVITY:
 		case I40E_MAC_ACTIVITY:
+		case I40E_LINK_ACTIVITY:
 			continue;
 		default:
 			break;
@@ -1636,9 +1639,6 @@
 		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
 			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
 
-		if (mode == I40E_LINK_ACTIVITY)
-			blink = false;
-
 		if (blink)
 			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 		else
diff -Nru dpdk-16.11.4/drivers/net/i40e/base/i40e_nvm.c dpdk-16.11.6/drivers/net/i40e/base/i40e_nvm.c
--- dpdk-16.11.4/drivers/net/i40e/base/i40e_nvm.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/base/i40e_nvm.c	2018-04-19 15:01:06.000000000 +0100
@@ -221,7 +221,8 @@
 
 #ifdef X722_SUPPORT
 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
-		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+		if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+			ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 		if (!ret_code) {
 			ret_code = i40e_read_nvm_word_aq(hw, offset, data);
 			i40e_release_nvm(hw);
diff -Nru dpdk-16.11.4/drivers/net/i40e/base/i40e_type.h dpdk-16.11.6/drivers/net/i40e/base/i40e_type.h
--- dpdk-16.11.4/drivers/net/i40e/base/i40e_type.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/base/i40e_type.h	2018-04-19 15:01:06.000000000 +0100
@@ -685,6 +685,7 @@
 
 #endif
 #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
 	u64 flags;
 
 	/* debug mask */
diff -Nru dpdk-16.11.4/drivers/net/i40e/i40e_ethdev.c dpdk-16.11.6/drivers/net/i40e/i40e_ethdev.c
--- dpdk-16.11.4/drivers/net/i40e/i40e_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/i40e_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -716,6 +716,15 @@
 	return 0;
 }
 
+static inline void
+i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+	i40e_write_rx_ctl(hw, reg_addr, reg_val);
+	PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+		    "with value 0x%08x",
+		    reg_addr, reg_val);
+}
+
 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
 
@@ -735,9 +744,10 @@
 	 * configuration API is added to avoid configuration conflicts
 	 * between ports of the same device.
 	 */
-	I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
-	I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
-	I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+	i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
 
 	/*
 	 * Initialize registers for parsing packet type of QinQ
@@ -745,8 +755,26 @@
 	 * configuration API is added to avoid configuration conflicts
 	 * between ports of the same device.
 	 */
-	I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
-	I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+	i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
+}
+
+static inline void i40e_config_automask(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint32_t val;
+
+	/* INTENA flag is not auto-cleared for interrupt */
+	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+
+	/* If support multi-driver, PF will use INT0. */
+	if (!pf->support_multi_driver)
+		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
 }
 
 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
@@ -933,6 +961,71 @@
 #define I40E_L2_TAGS_S_TAG_SHIFT 1
 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
 
+#define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
+			      ETH_I40E_SUPPORT_MULTI_DRIVER "=0|1");
+
+static int
+i40e_parse_multi_drv_handler(__rte_unused const char *key,
+			      const char *value,
+			      void *opaque)
+{
+	struct i40e_pf *pf;
+	unsigned long support_multi_driver;
+	char *end;
+
+	pf = (struct i40e_pf *)opaque;
+
+	errno = 0;
+	support_multi_driver = strtoul(value, &end, 10);
+	if (errno != 0 || end == value || *end != 0) {
+		PMD_DRV_LOG(WARNING, "Wrong global configuration");
+		return -(EINVAL);
+	}
+
+	if (support_multi_driver == 1 || support_multi_driver == 0)
+		pf->support_multi_driver = (bool)support_multi_driver;
+	else
+		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
+			    "enable global configuration by default."
+			    ETH_I40E_SUPPORT_MULTI_DRIVER);
+	return 0;
+}
+
+static int
+i40e_support_multi_driver(struct rte_eth_dev *dev)
+{
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_pci_device *pci_dev = dev->pci_dev;
+	static const char *valid_keys[] = {
+		ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
+	struct rte_kvargs *kvlist;
+
+	/* Enable global configuration by default */
+	pf->support_multi_driver = false;
+
+	if (!pci_dev->device.devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(pci_dev->device.devargs->args, valid_keys);
+	if (!kvlist)
+		return -EINVAL;
+
+	if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+			    "the first invalid or last valid one is used !",
+			    ETH_I40E_SUPPORT_MULTI_DRIVER);
+
+	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
+			       i40e_parse_multi_drv_handler, pf) < 0) {
+		rte_kvargs_free(kvlist);
+		return -EINVAL;
+	}
+
+	rte_kvargs_free(kvlist);
+	return 0;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -982,6 +1075,9 @@
 	hw->bus.func = pci_dev->addr.function;
 	hw->adapter_stopped = 0;
 
+	/* Check if need to support multi-driver */
+	i40e_support_multi_driver(dev);
+
 	/* Make sure all is clean before doing PF reset */
 	i40e_clear_hw(hw);
 
@@ -1002,13 +1098,16 @@
 		return ret;
 	}
 
+	i40e_config_automask(pf);
+
 	/*
 	 * To work around the NVM issue, initialize registers
 	 * for flexible payload and packet type of QinQ by
 	 * software. It should be removed once issues are fixed
 	 * in NVM.
 	 */
-	i40e_GLQF_reg_init(hw);
+	if (!pf->support_multi_driver)
+		i40e_GLQF_reg_init(hw);
 
 	/* Initialize the input set for filters (hash and fd) to default value */
 	i40e_filter_input_set_init(pf);
@@ -1104,11 +1203,14 @@
 	i40e_set_fc(hw, &aq_fail, TRUE);
 
 	/* Set the global registers with default ether type value */
-	ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
-	if (ret != I40E_SUCCESS) {
-		PMD_INIT_LOG(ERR, "Failed to set the default outer "
-			     "VLAN ether type");
-		goto err_setup_pf_switch;
+	if (!pf->support_multi_driver) {
+		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+					 ETHER_TYPE_VLAN);
+		if (ret != I40E_SUCCESS) {
+			PMD_INIT_LOG(ERR, "Failed to set the default outer "
+				     "VLAN ether type");
+			goto err_setup_pf_switch;
+		}
 	}
 
 	/* PF setup, which includes VSI setup */
@@ -1384,6 +1486,7 @@
 	int i;
 	uint32_t val;
 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
 
 	/* Bind all RX queues to allocated MSIX interrupt */
 	for (i = 0; i < nb_queue; i++) {
@@ -1402,7 +1505,8 @@
 	/* Write first RX queue to Link list register as the head element */
 	if (vsi->type != I40E_VSI_SRIOV) {
 		uint16_t interval =
-			i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+			i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL,
+					       pf->support_multi_driver);
 
 		if (msix_vect == I40E_MISC_VEC_ID) {
 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -1460,7 +1564,6 @@
 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
 	uint16_t queue_idx = 0;
 	int record = 0;
-	uint32_t val;
 	int i;
 
 	for (i = 0; i < vsi->nb_qps; i++) {
@@ -1468,13 +1571,6 @@
 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
 	}
 
-	/* INTENA flag is not auto-cleared for interrupt */
-	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
-	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
-		I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
-		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
-	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
-
 	/* VF bind interrupt */
 	if (vsi->type == I40E_VSI_SRIOV) {
 		__vsi_queues_bind_intr(vsi, msix_vect,
@@ -1527,27 +1623,22 @@
 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-	uint16_t interval = i40e_calc_itr_interval(\
-		RTE_LIBRTE_I40E_ITR_INTERVAL);
+	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
 	uint16_t msix_intr, i;
 
-	if (rte_intr_allow_others(intr_handle))
+	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
 		for (i = 0; i < vsi->nb_msix; i++) {
 			msix_intr = vsi->msix_intr + i;
 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
-				I40E_PFINT_DYN_CTLN_INTENA_MASK |
-				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-				(0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-				(interval <<
-				 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+				       I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 		}
 	else
 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
-			       (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
-			       (interval <<
-				I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 
 	I40E_WRITE_FLUSH(hw);
 }
@@ -1558,16 +1649,18 @@
 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
 	uint16_t msix_intr, i;
 
-	if (rte_intr_allow_others(intr_handle))
+	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
 		for (i = 0; i < vsi->nb_msix; i++) {
 			msix_intr = vsi->msix_intr + i;
 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
-				       0);
+				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 		}
 	else
-		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 
 	I40E_WRITE_FLUSH(hw);
 }
@@ -2743,11 +2836,17 @@
 		   uint16_t tpid)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	uint64_t reg_r = 0, reg_w = 0;
 	uint16_t reg_id = 0;
 	int ret = 0;
 	int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
 
+	if (pf->support_multi_driver) {
+		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
+		return -ENOTSUP;
+	}
+
 	switch (vlan_type) {
 	case ETH_VLAN_TYPE_OUTER:
 		if (qinq)
@@ -2797,8 +2896,11 @@
 			    "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
 		return ret;
 	}
-	PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
-		    "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+	PMD_DRV_LOG(DEBUG,
+		    "Global register 0x%08x is changed with value 0x%08x",
+		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
+
+	i40e_global_cfg_warning(I40E_WARNING_TPID);
 
 	return ret;
 }
@@ -3025,19 +3127,25 @@
 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
 	}
 
-	/* config the water marker both based on the packets and bytes */
-	I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
-		       (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
-		       << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
-	I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
-		       (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
-		       << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
-	I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
-		       pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
-		       << I40E_KILOSHIFT);
-	I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
-		       pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
-		       << I40E_KILOSHIFT);
+	if (!pf->support_multi_driver) {
+		/* config water marker both based on the packets and bytes */
+		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
+				(pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
+				(pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
+				 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+				 << I40E_KILOSHIFT);
+		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
+				  pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+				  << I40E_KILOSHIFT);
+		i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
+	} else {
+		PMD_DRV_LOG(ERR,
+			    "Water marker configuration is not supported.");
+	}
 
 	I40E_WRITE_FLUSH(hw);
 
@@ -4524,16 +4632,28 @@
 
 	/* VF has MSIX interrupt in VF range, don't allocate here */
 	if (type == I40E_VSI_MAIN) {
-		ret = i40e_res_pool_alloc(&pf->msix_pool,
-					  RTE_MIN(vsi->nb_qps,
-						  RTE_MAX_RXTX_INTR_VEC_ID));
-		if (ret < 0) {
-			PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
-				    vsi->seid, ret);
-			goto fail_queue_alloc;
+		if (pf->support_multi_driver) {
+			/* If support multi-driver, need to use INT0 instead of
+			 * allocating from msix pool. The Msix pool is init from
+			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
+			 * to 1 without calling i40e_res_pool_alloc.
+			 */
+			vsi->msix_intr = 0;
+			vsi->nb_msix = 1;
+		} else {
+			ret = i40e_res_pool_alloc(&pf->msix_pool,
+						  RTE_MIN(vsi->nb_qps,
+						     RTE_MAX_RXTX_INTR_VEC_ID));
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR,
+					    "VSI MAIN %d get heap failed %d",
+					    vsi->seid, ret);
+				goto fail_queue_alloc;
+			}
+			vsi->msix_intr = ret;
+			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
+					       RTE_MAX_RXTX_INTR_VEC_ID);
 		}
-		vsi->msix_intr = ret;
-		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
 	} else if (type != I40E_VSI_SRIOV) {
 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
 		if (ret < 0) {
@@ -4888,11 +5008,11 @@
 	int mask = 0;
 
 	/* Apply vlan offload setting */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+	mask = ETH_VLAN_STRIP_MASK |
+	       ETH_VLAN_FILTER_MASK |
+	       ETH_VLAN_EXTEND_MASK;
 	i40e_vlan_offload_set(dev, mask);
 
-	/* Apply double-vlan setting, not implemented yet */
-
 	/* Apply pvid setting */
 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
 				data->dev_conf.txmode.hw_vlan_insert_pvid);
@@ -5446,7 +5566,8 @@
 i40e_pf_disable_irq0(struct i40e_hw *hw)
 {
 	/* Disable all interrupt types */
-	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 	I40E_WRITE_FLUSH(hw);
 }
 
@@ -6507,7 +6628,7 @@
 			uint8_t add)
 {
 	uint16_t ip_type;
-	uint32_t ipv4_addr;
+	uint32_t ipv4_addr, ipv4_addr_le;
 	uint8_t i, tun_type = 0;
 	/* internal varialbe to convert ipv6 byte order */
 	uint32_t convert_ipv6[4];
@@ -6534,8 +6655,9 @@
 	if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
 		rte_memcpy(&pfilter->ipaddr.v4.data,
-				&rte_cpu_to_le_32(ipv4_addr),
+				&ipv4_addr_le,
 				sizeof(pfilter->ipaddr.v4.data));
 	} else {
 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
@@ -6855,9 +6977,15 @@
 static int
 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
 {
+	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
 	uint32_t val, reg;
 	int ret = -EINVAL;
 
+	if (pf->support_multi_driver) {
+		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
+		return -ENOTSUP;
+	}
+
 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
 
@@ -6875,6 +7003,10 @@
 						   reg, NULL);
 		if (ret != 0)
 			return ret;
+		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+			    "with value 0x%08x",
+			    I40E_GL_PRS_FVBM(2), reg);
+		i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
 	} else {
 		ret = 0;
 	}
@@ -7095,12 +7227,18 @@
 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
 				   struct rte_eth_hash_global_conf *g_cfg)
 {
+	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
 	int ret;
 	uint16_t i;
 	uint32_t reg;
 	uint32_t mask0 = g_cfg->valid_bit_mask[0];
 	enum i40e_filter_pctype pctype;
 
+	if (pf->support_multi_driver) {
+		PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+		return -ENOTSUP;
+	}
+
 	/* Check the input parameters */
 	ret = i40e_hash_global_config_check(g_cfg);
 	if (ret < 0)
@@ -7118,42 +7256,45 @@
 				I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
 		if (hw->mac.type == I40E_MAC_X722) {
 			if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
 				  reg);
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
 				  reg);
 			} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
 				  reg);
 			} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
 				  reg);
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
 				  reg);
 			} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+				i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
 				  I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
 				  reg);
 			} else {
-				i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
-				  reg);
+				i40e_write_global_rx_ctl(hw,
+							 I40E_GLQF_HSYM(pctype),
+							 reg);
 			}
 		} else {
-			i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+			i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+						 reg);
 		}
+		i40e_global_cfg_warning(I40E_WARNING_HSYM);
 	}
 
 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
@@ -7177,7 +7318,8 @@
 		/* Use the default, and keep it as it is */
 		goto out;
 
-	i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+	i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+	i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
 
 out:
 	I40E_WRITE_FLUSH(hw);
@@ -7791,6 +7933,18 @@
 }
 
 static void
+i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+	uint32_t reg = i40e_read_rx_ctl(hw, addr);
+
+	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
+	if (reg != val)
+		i40e_write_global_rx_ctl(hw, addr, val);
+	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
+		    (uint32_t)i40e_read_rx_ctl(hw, addr));
+}
+
+static void
 i40e_filter_input_set_init(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
@@ -7815,6 +7969,12 @@
 						   I40E_INSET_MASK_NUM_REG);
 		if (num < 0)
 			return;
+
+		if (pf->support_multi_driver && num > 0) {
+			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+			return;
+		}
+
 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
 					input_set);
 
@@ -7823,31 +7983,49 @@
 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
 				     (uint32_t)((inset_reg >>
 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
-		i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
-				      (uint32_t)(inset_reg & UINT32_MAX));
-		i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
-				     (uint32_t)((inset_reg >>
-				     I40E_32_BIT_WIDTH) & UINT32_MAX));
-
-		for (i = 0; i < num; i++) {
-			i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-					     mask_reg[i]);
-			i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-					     mask_reg[i]);
-		}
-		/*clear unused mask registers of the pctype */
-		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
-			i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-					     0);
-			i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-					     0);
+		if (!pf->support_multi_driver) {
+			i40e_check_write_global_reg(hw,
+					    I40E_GLQF_HASH_INSET(0, pctype),
+					    (uint32_t)(inset_reg & UINT32_MAX));
+			i40e_check_write_global_reg(hw,
+					    I40E_GLQF_HASH_INSET(1, pctype),
+					    (uint32_t)((inset_reg >>
+					    I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+			for (i = 0; i < num; i++) {
+				i40e_check_write_global_reg(hw,
+						    I40E_GLQF_FD_MSK(i, pctype),
+						    mask_reg[i]);
+				i40e_check_write_global_reg(hw,
+						  I40E_GLQF_HASH_MSK(i, pctype),
+						  mask_reg[i]);
+			}
+			/*clear unused mask registers of the pctype */
+			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+				i40e_check_write_global_reg(hw,
+						    I40E_GLQF_FD_MSK(i, pctype),
+						    0);
+				i40e_check_write_global_reg(hw,
+						  I40E_GLQF_HASH_MSK(i, pctype),
+						    0);
+			}
+		} else {
+			PMD_DRV_LOG(ERR,
+				    "Input set setting is not supported.");
 		}
 		I40E_WRITE_FLUSH(hw);
 
 		/* store the default input set */
-		pf->hash_input_set[pctype] = input_set;
+		if (!pf->support_multi_driver)
+			pf->hash_input_set[pctype] = input_set;
 		pf->fdir.input_set[pctype] = input_set;
 	}
+
+	if (!pf->support_multi_driver) {
+		i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
+		i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+		i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
+	}
 }
 
 int
@@ -7860,6 +8038,11 @@
 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
 	int ret, i, num;
 
+	if (pf->support_multi_driver) {
+		PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
+		return -ENOTSUP;
+	}
+
 	if (!conf) {
 		PMD_DRV_LOG(ERR, "Invalid pointer");
 		return -EFAULT;
@@ -7908,19 +8091,21 @@
 
 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
-	i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
-			      (uint32_t)(inset_reg & UINT32_MAX));
-	i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
-			     (uint32_t)((inset_reg >>
-			     I40E_32_BIT_WIDTH) & UINT32_MAX));
+	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+				    (uint32_t)(inset_reg & UINT32_MAX));
+	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+				    (uint32_t)((inset_reg >>
+				    I40E_32_BIT_WIDTH) & UINT32_MAX));
+	i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
 
 	for (i = 0; i < num; i++)
-		i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-				     mask_reg[i]);
+		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+					    mask_reg[i]);
 	/*clear unused mask registers of the pctype */
 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
-		i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-				     0);
+		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+					    0);
+	i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
 	I40E_WRITE_FLUSH(hw);
 
 	pf->hash_input_set[pctype] = input_set;
@@ -7984,6 +8169,11 @@
 	if (num < 0)
 		return -EINVAL;
 
+	if (pf->support_multi_driver && num > 0) {
+		PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+		return -ENOTSUP;
+	}
+
 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -7992,13 +8182,20 @@
 			     (uint32_t)((inset_reg >>
 			     I40E_32_BIT_WIDTH) & UINT32_MAX));
 
-	for (i = 0; i < num; i++)
-		i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-				     mask_reg[i]);
-	/*clear unused mask registers of the pctype */
-	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
-		i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-				     0);
+	if (!pf->support_multi_driver) {
+		for (i = 0; i < num; i++)
+			i40e_check_write_global_reg(hw,
+						    I40E_GLQF_FD_MSK(i, pctype),
+						    mask_reg[i]);
+		/*clear unused mask registers of the pctype */
+		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+			i40e_check_write_global_reg(hw,
+						    I40E_GLQF_FD_MSK(i, pctype),
+						    0);
+		i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+	} else {
+		PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+	}
 	I40E_WRITE_FLUSH(hw);
 
 	pf->fdir.input_set[pctype] = input_set;
@@ -9694,27 +9891,21 @@
 {
 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint16_t interval =
-		i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
 	uint16_t msix_intr;
 
 	msix_intr = intr_handle->intr_vec[queue_id];
 	if (msix_intr == I40E_MISC_VEC_ID)
 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
-			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
-			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-			       (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-			       (interval <<
-				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
+			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 	else
 		I40E_WRITE_REG(hw,
 			       I40E_PFINT_DYN_CTLN(msix_intr -
 						   I40E_RX_VEC_START),
 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-			       (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-			       (interval <<
-				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 
 	I40E_WRITE_FLUSH(hw);
 	rte_intr_enable(&dev->pci_dev->intr_handle);
@@ -9731,12 +9922,13 @@
 
 	msix_intr = intr_handle->intr_vec[queue_id];
 	if (msix_intr == I40E_MISC_VEC_ID)
-		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 	else
 		I40E_WRITE_REG(hw,
 			       I40E_PFINT_DYN_CTLN(msix_intr -
 						   I40E_RX_VEC_START),
-			       0);
+			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 	I40E_WRITE_FLUSH(hw);
 
 	return 0;
@@ -9832,14 +10024,43 @@
 				      struct ether_addr *mac_addr)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct i40e_vsi *vsi = pf->main_vsi;
+	struct i40e_mac_filter_info mac_filter;
+	struct i40e_mac_filter *f;
+	int ret;
 
 	if (!is_valid_assigned_ether_addr(mac_addr)) {
 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
 		return;
 	}
 
-	/* Flags: 0x3 updates port address */
-	i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+	TAILQ_FOREACH(f, &vsi->mac_list, next) {
+		if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+			break;
+	}
+
+	if (f == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+		return;
+	}
+
+	mac_filter = f->mac_info;
+	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+	if (ret != I40E_SUCCESS) {
+		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+		return;
+	}
+	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+	ret = i40e_vsi_add_mac(vsi, &mac_filter);
+	if (ret != I40E_SUCCESS) {
+		PMD_DRV_LOG(ERR, "Failed to add mac filter");
+		return;
+	}
+	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+	i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+				  mac_addr->addr_bytes, NULL);
 }
 
 static int
diff -Nru dpdk-16.11.4/drivers/net/i40e/i40e_ethdev.h dpdk-16.11.6/drivers/net/i40e/i40e_ethdev.h
--- dpdk-16.11.4/drivers/net/i40e/i40e_ethdev.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/i40e_ethdev.h	2018-04-19 15:01:06.000000000 +0100
@@ -103,6 +103,14 @@
 	(((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
 	((vf)->version_minor == 1))
 
+static inline void
+I40E_WRITE_GLB_REG(struct i40e_hw *hw, uint32_t reg, uint32_t value) {
+	I40E_WRITE_REG(hw, reg, value);
+	PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+		    "with value 0x%08x",
+		    reg, value);
+}
+
 /* index flex payload per layer */
 enum i40e_flxpld_layer_idx {
 	I40E_FLXPLD_L2_IDX    = 0,
@@ -477,6 +485,8 @@
 	bool floating_veb; /* The flag to use the floating VEB */
 	/* The floating enable flag for the specific VF */
 	bool floating_veb_list[I40E_MAX_VF];
+
+	bool support_multi_driver; /* 1 - support multiple driver */
 };
 
 enum pending_msg {
@@ -569,6 +579,22 @@
 	struct rte_timecounter tx_tstamp_tc;
 };
 
+enum I40E_WARNING_IDX {
+	I40E_WARNING_DIS_FLX_PLD,
+	I40E_WARNING_ENA_FLX_PLD,
+	I40E_WARNING_QINQ_PARSER,
+	I40E_WARNING_QINQ_CLOUD_FILTER,
+	I40E_WARNING_TPID,
+	I40E_WARNING_FLOW_CTL,
+	I40E_WARNING_GRE_KEY_LEN,
+	I40E_WARNING_QF_CTL,
+	I40E_WARNING_HASH_INSET,
+	I40E_WARNING_HSYM,
+	I40E_WARNING_HASH_MSK,
+	I40E_WARNING_FD_MSK,
+	I40E_WARNING_RPL_CLD_FILTER,
+};
+
 int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -694,15 +720,46 @@
 }
 
 static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
+i40e_calc_itr_interval(int16_t interval, bool is_multi_drv)
 {
-	if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
-		interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+	if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
+		if (is_multi_drv)
+			interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+		else
+			interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+	}
 
 	/* Convert to hardware count, as writing each 1 represents 2 us */
 	return interval / 2;
 }
 
+static inline void
+i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
+{
+	const char *warning;
+	static const char *const warning_list[] = {
+		[I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
+		[I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
+		[I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
+		[I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
+		[I40E_WARNING_TPID] = "support TPID configuration",
+		[I40E_WARNING_FLOW_CTL] = "configure water marker",
+		[I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
+		[I40E_WARNING_QF_CTL] = "support hash function setting",
+		[I40E_WARNING_HASH_INSET] = "configure hash input set",
+		[I40E_WARNING_HSYM] = "set symmetric hash",
+		[I40E_WARNING_HASH_MSK] = "configure hash mask",
+		[I40E_WARNING_FD_MSK] = "configure fdir mask",
+		[I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
+	};
+
+	warning = warning_list[idx];
+
+	RTE_LOG(WARNING, PMD,
+		"Global register is changed during %s\n",
+		warning);
+}
+
 #define I40E_VALID_FLOW(flow_type) \
 	((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
 	(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
diff -Nru dpdk-16.11.4/drivers/net/i40e/i40e_ethdev_vf.c dpdk-16.11.6/drivers/net/i40e/i40e_ethdev_vf.c
--- dpdk-16.11.4/drivers/net/i40e/i40e_ethdev_vf.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/i40e_ethdev_vf.c	2018-04-19 15:01:06.000000000 +0100
@@ -1035,14 +1035,16 @@
 static void
 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
 {
+	int ret;
 	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct i40e_eth_stats *pstats = NULL;
 
 	/* read stat values to clear hardware registers */
-	i40evf_update_stats(dev, &pstats);
+	ret = i40evf_update_stats(dev, &pstats);
 
 	/* set stats offset base on current values */
-	vf->vsi.eth_stats_offset = *pstats;
+	if (ret == 0)
+		vf->vsi.eth_stats_offset = *pstats;
 }
 
 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
@@ -1246,7 +1248,7 @@
 	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct ether_addr *p_mac_addr;
 	uint16_t interval =
-		i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
+		i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX, 0);
 
 	vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	vf->dev_data = dev->data;
@@ -1986,7 +1988,7 @@
 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint16_t interval =
-		i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+		i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0);
 	uint16_t msix_intr;
 
 	msix_intr = intr_handle->intr_vec[queue_id];
@@ -2113,7 +2115,8 @@
 					dev->data->nb_tx_queues);
 
 	/* check and configure queue intr-vector mapping */
-	if (dev->data->dev_conf.intr_conf.rxq != 0) {
+	if (rte_intr_cap_multiple(intr_handle) &&
+	    dev->data->dev_conf.intr_conf.rxq) {
 		intr_vector = dev->data->nb_rx_queues;
 		if (rte_intr_efd_enable(intr_handle, intr_vector))
 			return -1;
diff -Nru dpdk-16.11.4/drivers/net/i40e/i40e_fdir.c dpdk-16.11.6/drivers/net/i40e/i40e_fdir.c
--- dpdk-16.11.4/drivers/net/i40e/i40e_fdir.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/i40e_fdir.c	2018-04-19 15:01:06.000000000 +0100
@@ -165,7 +165,6 @@
 
 	rte_wmb();
 	/* Init the RX tail regieter. */
-	I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
 	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 
 	return err;
@@ -1011,13 +1010,18 @@
 				PMD_DRV_LOG(ERR, "invalid programming status"
 					    " reported, error = %u.", error);
 		} else
-			PMD_DRV_LOG(ERR, "unknown programming status"
+			PMD_DRV_LOG(INFO, "unknown programming status"
 				    " reported, len = %d, id = %u.", len, id);
 		rxdp->wb.qword1.status_error_len = 0;
 		rxq->rx_tail++;
 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
 			rxq->rx_tail = 0;
+		if (rxq->rx_tail == 0)
+			I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+		else
+			I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
 	}
+
 	return ret;
 }
 
diff -Nru dpdk-16.11.4/drivers/net/i40e/i40e_rxtx.c dpdk-16.11.6/drivers/net/i40e/i40e_rxtx.c
--- dpdk-16.11.4/drivers/net/i40e/i40e_rxtx.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/i40e_rxtx.c	2018-04-19 15:01:06.000000000 +0100
@@ -2606,6 +2606,7 @@
 	rxq->vsi = pf->fdir.fdir_vsi;
 
 	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+	memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc));
 	rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
 	/*
diff -Nru dpdk-16.11.4/drivers/net/i40e/i40e_rxtx_vec_altivec.c dpdk-16.11.6/drivers/net/i40e/i40e_rxtx_vec_altivec.c
--- dpdk-16.11.4/drivers/net/i40e/i40e_rxtx_vec_altivec.c	1970-01-01 01:00:00.000000000 +0100
+++ dpdk-16.11.6/drivers/net/i40e/i40e_rxtx_vec_altivec.c	2018-04-19 15:01:06.000000000 +0100
@@ -0,0 +1,654 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2017 IBM Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+	int i;
+	uint16_t rx_id;
+	volatile union i40e_rx_desc *rxdp;
+
+	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+	struct rte_mbuf *mb0, *mb1;
+
+	vector unsigned long hdr_room = (vector unsigned long){
+						RTE_PKTMBUF_HEADROOM,
+						RTE_PKTMBUF_HEADROOM};
+	vector unsigned long dma_addr0, dma_addr1;
+
+	rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+	/* Pull 'n' more MBUFs into the software ring */
+	if (rte_mempool_get_bulk(rxq->mp,
+				 (void *)rxep,
+				 RTE_I40E_RXQ_REARM_THRESH) < 0) {
+		if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+		    rxq->nb_rx_desc) {
+			dma_addr0 = (vector unsigned long){};
+			for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+				rxep[i].mbuf = &rxq->fake_mbuf;
+				vec_st(dma_addr0, 0,
+				       (vector unsigned long *)&rxdp[i].read);
+			}
+		}
+		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+			RTE_I40E_RXQ_REARM_THRESH;
+		return;
+	}
+
+	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
+	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+		vector unsigned long vaddr0, vaddr1;
+		uintptr_t p0, p1;
+
+		mb0 = rxep[0].mbuf;
+		mb1 = rxep[1].mbuf;
+
+		 /* Flush mbuf with pkt template.
+		  * Data to be rearmed is 6 bytes long.
+		  * Though, RX will overwrite ol_flags that are coming next
+		  * anyway. So overwrite whole 8 bytes with one load:
+		  * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+		  */
+		p0 = (uintptr_t)&mb0->rearm_data;
+		*(uint64_t *)p0 = rxq->mbuf_initializer;
+		p1 = (uintptr_t)&mb1->rearm_data;
+		*(uint64_t *)p1 = rxq->mbuf_initializer;
+
+		/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+		vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+		vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+		/* convert pa to dma_addr hdr/data */
+		dma_addr0 = vec_mergel(vaddr0, vaddr0);
+		dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+		/* add headroom to pa values */
+		dma_addr0 = vec_add(dma_addr0, hdr_room);
+		dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+		/* flush desc with pa dma_addr */
+		vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+		vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+	}
+
+	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+		rxq->rxrearm_start = 0;
+
+	rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+	/* Update the tail pointer on the NIC */
+	I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+	vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+	/* mask everything except RSS, flow director and VLAN flags
+	 * bit2 is for VLAN tag, bit11 for flow director indication
+	 * bit13:12 for RSS indication.
+	 */
+	const vector unsigned int rss_vlan_msk = (vector unsigned int){
+			(int32_t)0x1c03804, (int32_t)0x1c03804,
+			(int32_t)0x1c03804, (int32_t)0x1c03804};
+
+	/* map rss and vlan type to rss hash and vlan flag */
+	const vector unsigned char vlan_flags = (vector unsigned char){
+			0, 0, 0, 0,
+			PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0};
+
+	const vector unsigned char rss_flags = (vector unsigned char){
+			0, PKT_RX_FDIR, 0, 0,
+			0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+			0, 0, 0, 0,
+			0, 0, 0, 0};
+
+	const vector unsigned char l3_l4e_flags = (vector unsigned char){
+			0,
+			PKT_RX_IP_CKSUM_BAD,
+			PKT_RX_L4_CKSUM_BAD,
+			PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+			PKT_RX_EIP_CKSUM_BAD,
+			PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+			PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+			PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+					     | PKT_RX_IP_CKSUM_BAD,
+			0, 0, 0, 0, 0, 0, 0, 0};
+
+	vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+	vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+	vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+	vlan1 = vec_and(vlan0, rss_vlan_msk);
+	vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+					(vector unsigned char){},
+					*(vector unsigned char *)&vlan1);
+
+	rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+	rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+					*(vector unsigned char *)&rss);
+
+	l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+	l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+					(vector unsigned char){},
+					*(vector unsigned char *)&l3_l4e);
+
+	vlan0 = vec_or(vlan0, rss);
+	vlan0 = vec_or(vlan0, l3_l4e);
+
+	rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+	rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+	rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+	rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT     10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+	vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+	vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+	ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+	ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+	rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
+					(*(vector unsigned char *)&ptype0)[0]);
+	rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
+					(*(vector unsigned char *)&ptype0)[8]);
+	rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
+					(*(vector unsigned char *)&ptype1)[0]);
+	rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
+					(*(vector unsigned char *)&ptype1)[8]);
+}
+
+ /* Notice:
+  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+  *   numbers of DD bits
+  */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+		   uint16_t nb_pkts, uint8_t *split_packet)
+{
+	volatile union i40e_rx_desc *rxdp;
+	struct i40e_rx_entry *sw_ring;
+	uint16_t nb_pkts_recd;
+	int pos;
+	uint64_t var;
+	vector unsigned char shuf_msk;
+
+	vector unsigned short crc_adjust = (vector unsigned short){
+		0, 0,         /* ignore pkt_type field */
+		rxq->crc_len, /* sub crc on pkt_len */
+		0,            /* ignore high-16bits of pkt_len */
+		rxq->crc_len, /* sub crc on data_len */
+		0, 0, 0       /* ignore non-length fields */
+		};
+	vector unsigned long dd_check, eop_check;
+
+	/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+	nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+	/* Just the act of getting into the function from the application is
+	 * going to cost about 7 cycles
+	 */
+	rxdp = rxq->rx_ring + rxq->rx_tail;
+
+	rte_prefetch0(rxdp);
+
+	/* See if we need to rearm the RX queue - gives the prefetch a bit
+	 * of time to act
+	 */
+	if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+		i40e_rxq_rearm(rxq);
+
+	/* Before we start moving massive data around, check to see if
+	 * there is actually a packet available
+	 */
+	if (!(rxdp->wb.qword1.status_error_len &
+			rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		return 0;
+
+	/* 4 packets DD mask */
+	dd_check = (vector unsigned long){0x0000000100000001ULL,
+					  0x0000000100000001ULL};
+
+	/* 4 packets EOP mask */
+	eop_check = (vector unsigned long){0x0000000200000002ULL,
+					   0x0000000200000002ULL};
+
+	/* mask to shuffle from desc. to mbuf */
+	shuf_msk = (vector unsigned char){
+		0xFF, 0xFF,   /* pkt_type set as unknown */
+		0xFF, 0xFF,   /* pkt_type set as unknown */
+		14, 15,       /* octet 15~14, low 16 bits pkt_len */
+		0xFF, 0xFF,   /* skip high 16 bits pkt_len, zero out */
+		14, 15,       /* octet 15~14, 16 bits data_len */
+		2, 3,         /* octet 2~3, low 16 bits vlan_macip */
+		4, 5, 6, 7    /* octet 4~7, 32bits rss */
+		};
+
+	/* Cache is empty -> need to scan the buffer rings, but first move
+	 * the next 'n' mbufs into the cache
+	 */
+	sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+	/* A. load 4 packet in one loop
+	 * [A*. mask out 4 unused dirty field in desc]
+	 * B. copy 4 mbuf point from swring to rx_pkts
+	 * C. calc the number of DD bits among the 4 packets
+	 * [C*. extract the end-of-packet bit, if requested]
+	 * D. fill info. from desc to mbuf
+	 */
+
+	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+			pos += RTE_I40E_DESCS_PER_LOOP,
+			rxdp += RTE_I40E_DESCS_PER_LOOP) {
+		vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+		vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+		vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+		vector unsigned long mbp1, mbp2; /* two mbuf pointer
+						  * in one XMM reg.
+						  */
+
+		/* B.1 load 1 mbuf point */
+		mbp1 = *(vector unsigned long *)&sw_ring[pos];
+		/* Read desc statuses backwards to avoid race condition */
+		/* A.1 load 4 pkts desc */
+		descs[3] = *(vector unsigned long *)(rxdp + 3);
+		rte_compiler_barrier();
+
+		/* B.2 copy 2 mbuf point into rx_pkts  */
+		*(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+		/* B.1 load 1 mbuf point */
+		mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+		descs[2] = *(vector unsigned long *)(rxdp + 2);
+		rte_compiler_barrier();
+		/* B.1 load 2 mbuf point */
+		descs[1] = *(vector unsigned long *)(rxdp + 1);
+		rte_compiler_barrier();
+		descs[0] = *(vector unsigned long *)(rxdp);
+
+		/* B.2 copy 2 mbuf point into rx_pkts  */
+		*(vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
+
+		if (split_packet) {
+			rte_mbuf_prefetch_part2(rx_pkts[pos]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+		}
+
+		/* avoid compiler reorder optimization */
+		rte_compiler_barrier();
+
+		/* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+		const vector unsigned int len3 = vec_sl(
+			vec_ld(0, (vector unsigned int *)&descs[3]),
+			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+		const vector unsigned int len2 = vec_sl(
+			vec_ld(0, (vector unsigned int *)&descs[2]),
+			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+		/* merge the now-aligned packet length fields back in */
+		descs[3] = (vector unsigned long)len3;
+		descs[2] = (vector unsigned long)len2;
+
+		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
+		pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+				  (vector unsigned char){}, shuf_msk);
+		pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+				  (vector unsigned char){}, shuf_msk);
+
+		/* C.1 4=>2 filter staterr info only */
+		sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+					(vector unsigned short)descs[2]);
+		/* C.1 4=>2 filter staterr info only */
+		sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+					(vector unsigned short)descs[0]);
+		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+		pkt_mb4 = (vector unsigned char)vec_sub(
+				(vector unsigned short)pkt_mb4, crc_adjust);
+		pkt_mb3 = (vector unsigned char)vec_sub(
+				(vector unsigned short)pkt_mb3, crc_adjust);
+
+		/* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+		const vector unsigned int len1 = vec_sl(
+			vec_ld(0, (vector unsigned int *)&descs[1]),
+			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+		const vector unsigned int len0 = vec_sl(
+			vec_ld(0, (vector unsigned int *)&descs[0]),
+			(vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+		/* merge the now-aligned packet length fields back in */
+		descs[1] = (vector unsigned long)len1;
+		descs[0] = (vector unsigned long)len0;
+
+		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
+		pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+				   (vector unsigned char){}, shuf_msk);
+		pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+				   (vector unsigned char){}, shuf_msk);
+
+		/* C.2 get 4 pkts staterr value  */
+		staterr = (vector unsigned short)vec_mergeh(
+				sterr_tmp1, sterr_tmp2);
+
+		/* D.3 copy final 3,4 data to rx_pkts */
+		vec_st(pkt_mb4, 0,
+		 (vector unsigned char *)&rx_pkts[pos + 3]
+			->rx_descriptor_fields1
+		);
+		vec_st(pkt_mb3, 0,
+		 (vector unsigned char *)&rx_pkts[pos + 2]
+			->rx_descriptor_fields1
+		);
+
+		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+		pkt_mb2 = (vector unsigned char)vec_sub(
+				(vector unsigned short)pkt_mb2, crc_adjust);
+		pkt_mb1 = (vector unsigned char)vec_sub(
+				(vector unsigned short)pkt_mb1,	crc_adjust);
+
+		/* C* extract and record EOP bit */
+		if (split_packet) {
+			vector unsigned char eop_shuf_mask =
+				(vector unsigned char){
+					0xFF, 0xFF, 0xFF, 0xFF,
+					0xFF, 0xFF, 0xFF, 0xFF,
+					0xFF, 0xFF, 0xFF, 0xFF,
+					0x04, 0x0C, 0x00, 0x08
+				};
+
+			/* and with mask to extract bits, flipping 1-0 */
+			vector unsigned char eop_bits = vec_and(
+				(vector unsigned char)vec_nor(staterr, staterr),
+				(vector unsigned char)eop_check);
+			/* the staterr values are not in order, as the count
+			 * count of dd bits doesn't care. However, for end of
+			 * packet tracking, we do care, so shuffle. This also
+			 * compresses the 32-bit values to 8-bit
+			 */
+			eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+					    eop_shuf_mask);
+			/* store the resulting 32-bit value */
+			*split_packet = (vec_ld(0,
+					 (vector unsigned int *)&eop_bits))[0];
+			split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+			/* zero-out next pointers */
+			rx_pkts[pos]->next = NULL;
+			rx_pkts[pos + 1]->next = NULL;
+			rx_pkts[pos + 2]->next = NULL;
+			rx_pkts[pos + 3]->next = NULL;
+		}
+
+		/* C.3 calc available number of desc */
+		staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+		/* D.3 copy final 1,2 data to rx_pkts */
+		vec_st(pkt_mb2, 0,
+		 (vector unsigned char *)&rx_pkts[pos + 1]
+			->rx_descriptor_fields1
+		);
+		vec_st(pkt_mb1, 0,
+		 (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+		);
+		desc_to_ptype_v(descs, &rx_pkts[pos]);
+		desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+		/* C.4 calc avaialbe number of desc */
+		var = __builtin_popcountll((vec_ld(0,
+			(vector unsigned long *)&staterr)[0]));
+		nb_pkts_recd += var;
+		if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+			break;
+	}
+
+	/* Update our internal tail pointer */
+	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+	return nb_pkts_recd;
+}
+
+ /* Notice:
+  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+  *   numbers of DD bits
+  */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+		   uint16_t nb_pkts)
+{
+	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+  * Notice:
+  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+  *   numbers of DD bits
+  */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			     uint16_t nb_pkts)
+{
+	struct i40e_rx_queue *rxq = rx_queue;
+	uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+	/* get some new buffers */
+	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+			split_flags);
+	if (nb_bufs == 0)
+		return 0;
+
+	/* happy day case, full burst + no packets to be joined */
+	const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+	if (rxq->pkt_first_seg == NULL &&
+	    split_fl64[0] == 0 && split_fl64[1] == 0 &&
+	    split_fl64[2] == 0 && split_fl64[3] == 0)
+		return nb_bufs;
+
+	/* reassemble any packets that need reassembly*/
+	unsigned int i = 0;
+
+	if (!rxq->pkt_first_seg) {
+		/* find the first split flag, and only reassemble then*/
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			return nb_bufs;
+	}
+	return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+		&split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+	struct rte_mbuf *pkt, uint64_t flags)
+{
+	uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+		((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
+		((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+	vector unsigned long descriptor = (vector unsigned long){
+		pkt->buf_physaddr + pkt->data_off, high_qw};
+	*(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+	struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+{
+	int i;
+
+	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+		vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+		   uint16_t nb_pkts)
+{
+	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+	volatile struct i40e_tx_desc *txdp;
+	struct i40e_tx_entry *txep;
+	uint16_t n, nb_commit, tx_id;
+	uint64_t flags = I40E_TD_CMD;
+	uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+	int i;
+
+	/* cross rx_thresh boundary is not allowed */
+	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		i40e_tx_free_bufs(txq);
+
+	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+	nb_commit = nb_pkts;
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	tx_id = txq->tx_tail;
+	txdp = &txq->tx_ring[tx_id];
+	txep = &txq->sw_ring[tx_id];
+
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+	n = (uint16_t)(txq->nb_tx_desc - tx_id);
+	if (nb_commit >= n) {
+		tx_backlog_entry(txep, tx_pkts, n);
+
+		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+			vtx1(txdp, *tx_pkts, flags);
+
+		vtx1(txdp, *tx_pkts++, rs);
+
+		nb_commit = (uint16_t)(nb_commit - n);
+
+		tx_id = 0;
+		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+		/* avoid reach the end of ring */
+		txdp = &txq->tx_ring[tx_id];
+		txep = &txq->sw_ring[tx_id];
+	}
+
+	tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+	vtx(txdp, tx_pkts, nb_commit, flags);
+
+	tx_id = (uint16_t)(tx_id + nb_commit);
+	if (tx_id > txq->tx_next_rs) {
+		txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+						I40E_TXD_QW1_CMD_SHIFT);
+		txq->tx_next_rs =
+			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+	}
+
+	txq->tx_tail = tx_id;
+
+	I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+	return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+	_i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+	return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+	return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+	return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff -Nru dpdk-16.11.4/drivers/net/i40e/Makefile dpdk-16.11.6/drivers/net/i40e/Makefile
--- dpdk-16.11.4/drivers/net/i40e/Makefile	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/i40e/Makefile	2018-04-19 15:01:06.000000000 +0100
@@ -99,6 +99,8 @@
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
 ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
 else
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
 endif
diff -Nru dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_82599.c dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_82599.c
--- dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_82599.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_82599.c	2018-04-19 15:01:06.000000000 +0100
@@ -87,6 +87,9 @@
 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
 		mac->ops.set_rate_select_speed =
 					       ixgbe_set_hard_rate_select_speed;
+		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+			mac->ops.set_rate_select_speed =
+					       ixgbe_set_soft_rate_select_speed;
 	} else {
 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -564,6 +567,10 @@
 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
 		media_type = ixgbe_media_type_fiber_qsfp;
 		break;
+	case IXGBE_DEV_ID_82599_BYPASS:
+		media_type = ixgbe_media_type_fiber_fixed;
+		hw->phy.multispeed_fiber = true;
+		break;
 	default:
 		media_type = ixgbe_media_type_unknown;
 		break;
diff -Nru dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_api.c dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_api.c
--- dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_api.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_api.c	2018-04-19 15:01:06.000000000 +0100
@@ -178,6 +178,7 @@
 	case IXGBE_DEV_ID_82599EN_SFP:
 	case IXGBE_DEV_ID_82599_CX4:
 	case IXGBE_DEV_ID_82599_LS:
+	case IXGBE_DEV_ID_82599_BYPASS:
 	case IXGBE_DEV_ID_82599_T3_LOM:
 		hw->mac.type = ixgbe_mac_82599EB;
 		break;
@@ -192,6 +193,7 @@
 		break;
 	case IXGBE_DEV_ID_X540T:
 	case IXGBE_DEV_ID_X540T1:
+	case IXGBE_DEV_ID_X540_BYPASS:
 		hw->mac.type = ixgbe_mac_X540;
 		hw->mvals = ixgbe_mvals_X540;
 		break;
diff -Nru dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_common.c dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_common.c
--- dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_common.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_common.c	2018-04-19 15:01:06.000000000 +0100
@@ -166,6 +166,7 @@
 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
 
 	switch (hw->phy.media_type) {
+	case ixgbe_media_type_fiber_fixed:
 	case ixgbe_media_type_fiber_qsfp:
 	case ixgbe_media_type_fiber:
 		/* flow control autoneg black list */
@@ -196,6 +197,7 @@
 		case IXGBE_DEV_ID_82599_T3_LOM:
 		case IXGBE_DEV_ID_X540T:
 		case IXGBE_DEV_ID_X540T1:
+		case IXGBE_DEV_ID_X540_BYPASS:
 		case IXGBE_DEV_ID_X550T:
 		case IXGBE_DEV_ID_X550T1:
 		case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -261,6 +263,7 @@
 			goto out;
 
 		/* only backplane uses autoc so fall though */
+	case ixgbe_media_type_fiber_fixed:
 	case ixgbe_media_type_fiber_qsfp:
 	case ixgbe_media_type_fiber:
 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -3068,6 +3071,7 @@
 
 	switch (hw->phy.media_type) {
 	/* Autoneg flow control on fiber adapters */
+	case ixgbe_media_type_fiber_fixed:
 	case ixgbe_media_type_fiber_qsfp:
 	case ixgbe_media_type_fiber:
 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
@@ -4552,7 +4556,7 @@
 	/* first pull in the header so we know the buffer length */
 	for (bi = 0; bi < dword_len; bi++) {
 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-		IXGBE_LE32_TO_CPUS(&buffer[bi]);
+		IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
 	}
 
 	/* If there is any thing in data position pull it in */
@@ -4572,7 +4576,7 @@
 	/* Pull in the rest of the buffer (bi is where we left off) */
 	for (; bi <= dword_len; bi++) {
 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-		IXGBE_LE32_TO_CPUS(&buffer[bi]);
+		IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
 	}
 
 rel_out:
@@ -5065,6 +5069,7 @@
 
 		/* Set the module link speed */
 		switch (hw->phy.media_type) {
+		case ixgbe_media_type_fiber_fixed:
 		case ixgbe_media_type_fiber:
 			ixgbe_set_rate_select_speed(hw,
 						    IXGBE_LINK_SPEED_10GB_FULL);
@@ -5115,6 +5120,7 @@
 
 		/* Set the module link speed */
 		switch (hw->phy.media_type) {
+		case ixgbe_media_type_fiber_fixed:
 		case ixgbe_media_type_fiber:
 			ixgbe_set_rate_select_speed(hw,
 						    IXGBE_LINK_SPEED_1GB_FULL);
diff -Nru dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_mbx.c dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_mbx.c
--- dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_mbx.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_mbx.c	2018-04-19 15:01:06.000000000 +0100
@@ -444,17 +444,6 @@
 	for (i = 0; i < size; i++)
 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
 
-	/*
-	 * Complete the remaining mailbox data registers with zero to reset
-	 * the data sent in a previous exchange (in either side) with the PF,
-	 * including exchanges performed by another Guest OS to which that VF
-	 * was previously assigned.
-	 */
-	while (i < hw->mbx.size) {
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
-		i++;
-	}
-
 	/* update stats */
 	hw->mbx.stats.msgs_tx++;
 
@@ -693,17 +682,6 @@
 	for (i = 0; i < size; i++)
 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
 
-	/*
-	 * Complete the remaining mailbox data registers with zero to reset
-	 * the data sent in a previous exchange (in either side) with the VF,
-	 * including exchanges performed by another Guest OS to which that VF
-	 * was previously assigned.
-	 */
-	while (i < hw->mbx.size) {
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
-		i++;
-	}
-
 	/* Interrupt VF to tell it a message has been sent and release buffer*/
 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
 
diff -Nru dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_type.h dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_type.h
--- dpdk-16.11.4/drivers/net/ixgbe/base/ixgbe_type.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ixgbe/base/ixgbe_type.h	2018-04-19 15:01:06.000000000 +0100
@@ -123,9 +123,11 @@
 #define IXGBE_DEV_ID_82599_VF			0x10ED
 #define IXGBE_DEV_ID_82599_VF_HV		0x152E
 #define IXGBE_DEV_ID_82599_LS			0x154F
+#define IXGBE_DEV_ID_82599_BYPASS		0x155D
 #define IXGBE_DEV_ID_X540T			0x1528
 #define IXGBE_DEV_ID_X540_VF			0x1515
 #define IXGBE_DEV_ID_X540_VF_HV			0x1530
+#define IXGBE_DEV_ID_X540_BYPASS		0x155C
 #define IXGBE_DEV_ID_X540T1			0x1560
 #define IXGBE_DEV_ID_X550T			0x1563
 #define IXGBE_DEV_ID_X550T1			0x15D1
@@ -270,7 +272,6 @@
 #define IXGBE_I2C_BB_EN_X550		0x00000100
 #define IXGBE_I2C_BB_EN_X550EM_x	IXGBE_I2C_BB_EN_X550
 #define IXGBE_I2C_BB_EN_X550EM_a	IXGBE_I2C_BB_EN_X550
-
 #define IXGBE_I2C_BB_EN_BY_MAC(_hw)	IXGBE_BY_MAC((_hw), I2C_BB_EN)
 
 #define IXGBE_I2C_CLK_OE_N_EN		0
@@ -3626,6 +3627,7 @@
 enum ixgbe_media_type {
 	ixgbe_media_type_unknown = 0,
 	ixgbe_media_type_fiber,
+	ixgbe_media_type_fiber_fixed,
 	ixgbe_media_type_fiber_qsfp,
 	ixgbe_media_type_fiber_lco,
 	ixgbe_media_type_copper,
diff -Nru dpdk-16.11.4/drivers/net/ixgbe/ixgbe_ethdev.c dpdk-16.11.6/drivers/net/ixgbe/ixgbe_ethdev.c
--- dpdk-16.11.4/drivers/net/ixgbe/ixgbe_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ixgbe/ixgbe_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -93,6 +93,9 @@
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680
 
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
@@ -249,6 +252,8 @@
 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+				   int wait_to_complete);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
@@ -619,7 +624,7 @@
 	.dev_configure        = ixgbevf_dev_configure,
 	.dev_start            = ixgbevf_dev_start,
 	.dev_stop             = ixgbevf_dev_stop,
-	.link_update          = ixgbe_dev_link_update,
+	.link_update          = ixgbevf_dev_link_update,
 	.stats_get            = ixgbevf_dev_stats_get,
 	.xstats_get           = ixgbevf_dev_xstats_get,
 	.stats_reset          = ixgbevf_dev_stats_reset,
@@ -1959,9 +1964,10 @@
 		return -EINVAL;
 	}
 
-	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
-	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
-
+	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+		dev->pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
 	return 0;
 }
 
@@ -2001,8 +2007,6 @@
 		case ETH_MQ_RX_NONE:
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
 			break;
 		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
 			/* SRIOV only works in VMDq enable mode */
@@ -3217,15 +3221,123 @@
 	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+		   int *link_up, int wait_to_complete)
+{
+	/**
+	 * for a quick link status checking, wait_to_compelet == 0,
+	 * skip PF link status checking
+	 */
+	bool no_pflink_check = wait_to_complete == 0;
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	uint32_t links_reg, in_msg;
+	int ret_val = 0;
+
+	/* If we were hit with a reset drop the link */
+	if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+		mac->get_link_status = true;
+
+	if (!mac->get_link_status)
+		goto out;
+
+	/* if link status is down no point in checking to see if pf is up */
+	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+	if (!(links_reg & IXGBE_LINKS_UP))
+		goto out;
+
+	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+	 * before the link status is correct
+	 */
+	if (mac->type == ixgbe_mac_82599_vf) {
+		int i;
+
+		for (i = 0; i < 5; i++) {
+			rte_delay_us(100);
+			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+			if (!(links_reg & IXGBE_LINKS_UP))
+				goto out;
+		}
+	}
+
+	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+	case IXGBE_LINKS_SPEED_10G_82599:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		if (hw->mac.type >= ixgbe_mac_X550) {
+			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+		}
+		break;
+	case IXGBE_LINKS_SPEED_1G_82599:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_100_82599:
+		*speed = IXGBE_LINK_SPEED_100_FULL;
+		if (hw->mac.type == ixgbe_mac_X550) {
+			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+				*speed = IXGBE_LINK_SPEED_5GB_FULL;
+		}
+		break;
+	case IXGBE_LINKS_SPEED_10_X550EM_A:
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+		/* Since Reserved in older MAC's */
+		if (hw->mac.type >= ixgbe_mac_X550)
+			*speed = IXGBE_LINK_SPEED_10_FULL;
+		break;
+	default:
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+	}
+
+	if (no_pflink_check) {
+		if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+			mac->get_link_status = true;
+		else
+			mac->get_link_status = false;
+
+		goto out;
+	}
+	/* if the read failed it could just be a mailbox collision, best wait
+	 * until we are called again and don't report an error
+	 */
+	if (mbx->ops.read(hw, &in_msg, 1, 0))
+		goto out;
+
+	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+		/* msg is not CTS and is NACK we must have lost CTS status */
+		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+			ret_val = -1;
+		goto out;
+	}
+
+	/* the pf is talking, if we timed out in the past we reinit */
+	if (!mbx->timeout) {
+		ret_val = -1;
+		goto out;
+	}
+
+	/* if we passed all the tests above then the link is up and we no
+	 * longer need to check for link
+	 */
+	mac->get_link_status = false;
+
+out:
+	*link_up = !mac->get_link_status;
+	return ret_val;
+}
+
 /* return 0 means link status changed, -1 means not changed */
 static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+			    int wait_to_complete, int vf)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct rte_eth_link link, old;
 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 	int link_up;
 	int diag;
+	int wait = 1;
 
 	link.link_status = ETH_LINK_DOWN;
 	link.link_speed = 0;
@@ -3238,9 +3350,12 @@
 
 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
-		diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+		wait = 0;
+
+	if (vf)
+		diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
 	else
-		diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
 	if (diag != 0) {
 		link.link_speed = ETH_SPEED_NUM_100M;
@@ -3287,6 +3402,18 @@
 	return 0;
 }
 
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
 static void
 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
@@ -4206,7 +4333,11 @@
 
 	PMD_INIT_FUNC_TRACE();
 
-	hw->mac.ops.reset_hw(hw);
+	err = hw->mac.ops.reset_hw(hw);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+		return err;
+	}
 	hw->mac.get_link_status = true;
 
 	/* negotiate mailbox API version to use with the PF. */
@@ -4233,7 +4364,8 @@
 	ixgbevf_dev_rxtx_start(dev);
 
 	/* check and configure queue intr-vector mapping */
-	if (dev->data->dev_conf.intr_conf.rxq != 0) {
+	if (rte_intr_cap_multiple(intr_handle) &&
+	    dev->data->dev_conf.intr_conf.rxq != 0) {
 		intr_vector = dev->data->nb_rx_queues;
 		if (rte_intr_efd_enable(intr_handle, intr_vector))
 			return -1;
@@ -7569,12 +7701,17 @@
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	u32 in_msg = 0;
 
-	if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
-		return;
+	/* peek the message first */
+	in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
 
 	/* PF reset VF event */
-	if (in_msg == IXGBE_PF_CONTROL_MSG)
-		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+	if (in_msg == IXGBE_PF_CONTROL_MSG) {
+		/* dummy mbx read to ack pf */
+		if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+			return;
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+					      NULL);
+	}
 }
 
 static int
diff -Nru dpdk-16.11.4/drivers/net/mlx5/mlx5_ethdev.c dpdk-16.11.6/drivers/net/mlx5/mlx5_ethdev.c
--- dpdk-16.11.4/drivers/net/mlx5/mlx5_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/mlx5/mlx5_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -658,6 +658,7 @@
 			       (*priv->rss_conf)[0]->rss_key_len :
 			       0);
 	info->speed_capa = priv->link_speed_capa;
+	info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
 	priv_unlock(priv);
 }
 
@@ -985,9 +986,7 @@
 		/* Provide new values to rxq_setup(). */
 		dev->data->dev_conf.rxmode.jumbo_frame = sp;
 		dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
-		if (rehash)
-			ret = rxq_rehash(dev, rxq_ctrl);
-		else
+		if (!rehash)
 			ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
 					     rxq_ctrl->socket, NULL, rxq->mp);
 		if (!ret)
@@ -1239,8 +1238,12 @@
 	struct priv *priv = dev->data->dev_private;
 	int ret;
 
-	priv_lock(priv);
-	assert(priv->pending_alarm == 1);
+	while (!priv_trylock(priv)) {
+		/* Alarm is being canceled. */
+		if (priv->pending_alarm == 0)
+			return;
+		rte_pause();
+	}
 	priv->pending_alarm = 0;
 	ret = priv_dev_link_status_handler(priv, dev);
 	priv_unlock(priv);
@@ -1287,9 +1290,10 @@
 	rte_intr_callback_unregister(&priv->intr_handle,
 				     mlx5_dev_interrupt_handler,
 				     dev);
-	if (priv->pending_alarm)
+	if (priv->pending_alarm) {
+		priv->pending_alarm = 0;
 		rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
-	priv->pending_alarm = 0;
+	}
 	priv->intr_handle.fd = 0;
 	priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
 }
diff -Nru dpdk-16.11.4/drivers/net/mlx5/mlx5.h dpdk-16.11.6/drivers/net/mlx5/mlx5.h
--- dpdk-16.11.4/drivers/net/mlx5/mlx5.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/mlx5/mlx5.h	2018-04-19 15:01:06.000000000 +0100
@@ -161,6 +161,22 @@
 }
 
 /**
+ * Try to lock private structure to protect it from concurrent access in the
+ * control path.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+priv_trylock(struct priv *priv)
+{
+	return rte_spinlock_trylock(&priv->lock);
+}
+
+/**
  * Unlock private structure.
  *
  * @param priv
diff -Nru dpdk-16.11.4/drivers/net/nfp/nfp_net.c dpdk-16.11.6/drivers/net/nfp/nfp_net.c
--- dpdk-16.11.4/drivers/net/nfp/nfp_net.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/nfp/nfp_net.c	2018-04-19 15:01:06.000000000 +0100
@@ -517,12 +517,10 @@
 		new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
 	if (rxmode->jumbo_frame)
-		/* this is handled in rte_eth_dev_configure */
+		hw->mtu = rxmode->max_rx_pkt_len;
 
-	if (rxmode->hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "strip CRC not supported\n");
-		return -EINVAL;
-	}
+	if (!rxmode->hw_strip_crc)
+		PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable\n");
 
 	if (rxmode->enable_scatter) {
 		PMD_INIT_LOG(INFO, "Scatter not supported\n");
@@ -1012,7 +1010,7 @@
 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
 	dev_info->min_rx_bufsize = ETHER_MIN_MTU;
-	dev_info->max_rx_pktlen = hw->mtu;
+	dev_info->max_rx_pktlen = hw->max_mtu;
 	/* Next should change when PF support is implemented */
 	dev_info->max_mac_addrs = 1;
 
@@ -1240,6 +1238,13 @@
 	if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
 		return -EINVAL;
 
+	/* mtu setting is forbidden if port is started */
+	if (dev->data->dev_started) {
+		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+			    dev->data->port_id);
+		return -EBUSY;
+	}
+
 	/* switch to jumbo mode if needed */
 	if ((uint32_t)mtu > ETHER_MAX_LEN)
 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
@@ -2390,7 +2395,7 @@
 	hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
-	hw->mtu = hw->max_mtu;
+	hw->mtu = ETHER_MTU;
 
 	if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
 		hw->rx_offset = NFP_NET_RX_OFFSET;
diff -Nru dpdk-16.11.4/drivers/net/null/rte_eth_null.c dpdk-16.11.6/drivers/net/null/rte_eth_null.c
--- dpdk-16.11.4/drivers/net/null/rte_eth_null.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/null/rte_eth_null.c	2018-04-19 15:01:06.000000000 +0100
@@ -93,7 +93,7 @@
 	.link_speed = ETH_SPEED_NUM_10G,
 	.link_duplex = ETH_LINK_FULL_DUPLEX,
 	.link_status = ETH_LINK_DOWN,
-	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
+	.link_autoneg = ETH_LINK_AUTONEG,
 };
 
 static uint16_t
diff -Nru dpdk-16.11.4/drivers/net/pcap/rte_eth_pcap.c dpdk-16.11.6/drivers/net/pcap/rte_eth_pcap.c
--- dpdk-16.11.4/drivers/net/pcap/rte_eth_pcap.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/pcap/rte_eth_pcap.c	2018-04-19 15:01:06.000000000 +0100
@@ -124,7 +124,7 @@
 		.link_speed = ETH_SPEED_NUM_10G,
 		.link_duplex = ETH_LINK_FULL_DUPLEX,
 		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_SPEED_FIXED,
+		.link_autoneg = ETH_LINK_AUTONEG,
 };
 
 static int
@@ -801,7 +801,7 @@
 	struct rte_eth_dev_data *data = NULL;
 	unsigned int numa_node = rte_socket_id();
 
-	RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
+	RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n",
 		numa_node);
 
 	/* now do all data allocation - for eth_dev structure
@@ -1042,7 +1042,7 @@
 {
 	struct rte_eth_dev *eth_dev = NULL;
 
-	RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
+	RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n",
 			rte_socket_id());
 
 	if (name == NULL)
diff -Nru dpdk-16.11.4/drivers/net/qede/base/ecore_dcbx.c dpdk-16.11.6/drivers/net/qede/base/ecore_dcbx.c
--- dpdk-16.11.4/drivers/net/qede/base/ecore_dcbx.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/qede/base/ecore_dcbx.c	2018-04-19 15:01:06.000000000 +0100
@@ -239,10 +239,9 @@
 		status = true;
 	} else {
 		*type = DCBX_MAX_PROTOCOL_TYPE;
-		DP_ERR(p_hwfn,
-		       "No action required, App TLV id = 0x%x"
-		       " app_prio_bitmap = 0x%x\n",
-		       id, app_prio_bitmap);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+			    "No action required, App TLV entry = 0x%x\n",
+			   app_prio_bitmap);
 	}
 
 	return status;
diff -Nru dpdk-16.11.4/drivers/net/qede/base/ecore_vf.c dpdk-16.11.6/drivers/net/qede/base/ecore_vf.c
--- dpdk-16.11.4/drivers/net/qede/base/ecore_vf.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/qede/base/ecore_vf.c	2018-04-19 15:01:06.000000000 +0100
@@ -1027,6 +1027,12 @@
 		if (sge_tpa_params->tpa_gro_consistent_flg)
 			p_sge_tpa_tlv->sge_tpa_flags |=
 			    VFPF_TPA_GRO_CONSIST_FLAG;
+		if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
+			p_sge_tpa_tlv->sge_tpa_flags |=
+			    VFPF_TPA_TUNN_IPV4_EN_FLAG;
+		if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
+			p_sge_tpa_tlv->sge_tpa_flags |=
+			    VFPF_TPA_TUNN_IPV6_EN_FLAG;
 
 		p_sge_tpa_tlv->tpa_max_aggs_num =
 		    sge_tpa_params->tpa_max_aggs_num;
diff -Nru dpdk-16.11.4/drivers/net/qede/base/ecore_vfpf_if.h dpdk-16.11.6/drivers/net/qede/base/ecore_vfpf_if.h
--- dpdk-16.11.4/drivers/net/qede/base/ecore_vfpf_if.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/qede/base/ecore_vfpf_if.h	2018-04-19 15:01:06.000000000 +0100
@@ -379,6 +379,8 @@
 	#define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)
 	#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
 	#define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)
+	#define VFPF_TPA_TUNN_IPV4_EN_FLAG   (1 << 5)
+	#define VFPF_TPA_TUNN_IPV6_EN_FLAG   (1 << 6)
 
 	u8			update_sge_tpa_flags;
 	#define VFPF_UPDATE_SGE_DEPRECATED_FLAG	   (1 << 0)
diff -Nru dpdk-16.11.4/drivers/net/qede/qede_ethdev.c dpdk-16.11.6/drivers/net/qede/qede_ethdev.c
--- dpdk-16.11.4/drivers/net/qede/qede_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/qede/qede_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -282,6 +282,67 @@
 	return 0;
 }
 
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+	unsigned int i = 0, j = 0, qid;
+	unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+	struct qede_tx_queue *txq;
+
+	DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
+	txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+	for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+		if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+			OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+			    offsetof(struct qede_rx_queue, rcv_pkts), 0,
+			    sizeof(uint64_t));
+			OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+			    offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+			    sizeof(uint64_t));
+			OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+			    offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+			    sizeof(uint64_t));
+
+			if (xstats)
+				for (j = 0;
+				     j < RTE_DIM(qede_rxq_xstats_strings); j++)
+					OSAL_MEMSET((((char *)
+					    (qdev->fp_array[qid].rxq)) +
+					    qede_rxq_xstats_strings[j].offset),
+					    0,
+					    sizeof(uint64_t));
+
+			i++;
+			if (i == rxq_stat_cntrs)
+				break;
+		}
+	}
+
+	i = 0;
+
+	for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+		if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+			txq = qdev->fp_array[(qid)].txqs[0];
+
+			OSAL_MEMSET((uint64_t *)(uintptr_t)
+				(((uint64_t)(uintptr_t)(txq)) +
+				 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+			    sizeof(uint64_t));
+
+			i++;
+			if (i == txq_stat_cntrs)
+				break;
+		}
+	}
+}
+
 static int
 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
 		  bool add)
@@ -629,7 +690,7 @@
 
 	start.remove_inner_vlan = 1;
 	start.gro_enable = 0;
-	start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
+	start.mtu = qdev->mtu;
 	start.vport_id = 0;
 	start.drop_ttl0 = false;
 	start.clear_stats = 1;
@@ -674,6 +735,14 @@
 		}
 	}
 
+	/* We need to have min 1 RX queue.There is no min check in
+	 * rte_eth_dev_configure(), so we are checking it here.
+	 */
+	if (eth_dev->data->nb_rx_queues == 0) {
+		DP_ERR(edev, "Minimum one RX queue is required\n");
+		return -EINVAL;
+	}
+
 	/* Sanity checks and throw warnings */
 	if (rxmode->enable_scatter == 1)
 		eth_dev->data->scattered_rx = 1;
@@ -709,6 +778,14 @@
 	if (rc != 0)
 		return rc;
 
+	/* If jumbo enabled adjust MTU */
+	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+		eth_dev->data->mtu =
+			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+			ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+	qdev->mtu = eth_dev->data->mtu;
+
 	/* Issue VPORT-START with default config values to allow
 	 * other port configurations early on.
 	 */
@@ -756,8 +833,7 @@
 
 	PMD_INIT_FUNC_TRACE(edev);
 
-	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
-					      QEDE_ETH_OVERHEAD);
+	dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
 	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
 	dev_info->rx_desc_lim = qede_rx_desc_lim;
 	dev_info->tx_desc_lim = qede_tx_desc_lim;
@@ -1115,6 +1191,7 @@
 	struct ecore_dev *edev = &qdev->edev;
 
 	ecore_reset_vport_stats(edev);
+	qede_reset_queue_stats(qdev, true);
 }
 
 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1150,6 +1227,7 @@
 	struct ecore_dev *edev = &qdev->edev;
 
 	ecore_reset_vport_stats(edev);
+	qede_reset_queue_stats(qdev, false);
 }
 
 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
@@ -1395,32 +1473,76 @@
 
 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
-	uint32_t frame_size;
-	struct qede_dev *qdev = dev->data->dev_private;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	struct rte_eth_dev_info dev_info = {0};
+	struct qede_fastpath *fp;
+	uint32_t max_rx_pkt_len;
+	uint32_t frame_size;
+	uint16_t rx_buf_size;
+	uint16_t bufsz;
+	bool restart = false;
+	int i;
 
+	PMD_INIT_FUNC_TRACE(edev);
+	if (IS_VF(edev))
+		return -ENOTSUP;
 	qede_dev_info_get(dev, &dev_info);
-
-	/* VLAN_TAG = 4 */
-	frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
-	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+	max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+	frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+		DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+		       mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+			ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
 		return -EINVAL;
-
+	}
 	if (!dev->data->scattered_rx &&
-	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+		DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+			dev->data->min_rx_buf_size);
 		return -EINVAL;
-
-	if (frame_size > ETHER_MAX_LEN)
+	}
+	/* Temporarily replace I/O functions with dummy ones. It cannot
+	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+	 */
+	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+	if (dev->data->dev_started) {
+		dev->data->dev_started = 0;
+		qede_dev_stop(dev);
+		restart = true;
+	}
+	rte_delay_ms(1000);
+	qdev->mtu = mtu;
+	/* Fix up RX buf size for all queues of the port */
+	for_each_queue(i) {
+		fp = &qdev->fp_array[i];
+		if ((fp->type & QEDE_FASTPATH_RX) && (fp->rxq != NULL)) {
+			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+			if (dev->data->scattered_rx)
+				rx_buf_size = bufsz + ETHER_HDR_LEN +
+					      ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+			else
+				rx_buf_size = frame_size;
+			rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+			fp->rxq->rx_buf_size = rx_buf_size;
+			DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+		}
+	}
+	if (max_rx_pkt_len > ETHER_MAX_LEN)
 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
 	else
 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
+	if (!dev->data->dev_started && restart) {
+		qede_dev_start(dev);
+		dev->data->dev_started = 1;
+	}
 	/* update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-	qdev->mtu = mtu;
-	qede_dev_stop(dev);
-	qede_dev_start(dev);
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+	/* Reassign back */
+	dev->rx_pkt_burst = qede_recv_pkts;
+	dev->tx_pkt_burst = qede_xmit_pkts;
 
 	return 0;
 }
diff -Nru dpdk-16.11.4/drivers/net/qede/qede_rxtx.c dpdk-16.11.6/drivers/net/qede/qede_rxtx.c
--- dpdk-16.11.4/drivers/net/qede/qede_rxtx.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/qede/qede_rxtx.c	2018-04-19 15:01:06.000000000 +0100
@@ -89,11 +89,11 @@
 {
 	struct qede_dev *qdev = dev->data->dev_private;
 	struct ecore_dev *edev = &qdev->edev;
-	struct rte_eth_dev_data *eth_data = dev->data;
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 	struct qede_rx_queue *rxq;
-	uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	uint16_t max_rx_pkt_len;
+	uint16_t bufsz;
 	size_t size;
-	uint16_t data_size;
 	int rc;
 	int i;
 
@@ -127,34 +127,27 @@
 	rxq->nb_rx_desc = nb_desc;
 	rxq->queue_id = queue_idx;
 	rxq->port_id = dev->data->port_id;
+	max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
 
-	/* Sanity check */
-	data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
-				RTE_PKTMBUF_HEADROOM;
-
-	if (pkt_len > data_size && !dev->data->scattered_rx) {
-		DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
-		       pkt_len, data_size);
-		rte_free(rxq);
-		return -EINVAL;
+	/* Fix up RX buffer size */
+	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+	if ((rxmode->enable_scatter)			||
+	    (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+		if (!dev->data->scattered_rx) {
+			DP_INFO(edev, "Forcing scatter-gather mode\n");
+			dev->data->scattered_rx = 1;
+		}
 	}
-
 	if (dev->data->scattered_rx)
-		rxq->rx_buf_size = data_size;
+		rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+				   ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
 	else
-		rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+		rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+	/* Align to cache-line size if needed */
+	rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
 
-	qdev->mtu = pkt_len;
-
-	DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
-		qdev->mtu, rxq->rx_buf_size);
-
-	if (pkt_len > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
-		DP_NOTICE(edev, false, "jumbo frame enabled\n");
-	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
-	}
+	DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+		qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
 
 	/* Allocate the parallel driver ring for Rx buffers */
 	size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
@@ -222,7 +215,7 @@
 	dev->data->rx_queues[queue_idx] = rxq;
 
 	DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
-		  queue_idx, nb_desc, qdev->mtu, socket_id);
+		  queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
 
 	return 0;
 err4:
@@ -1541,3 +1534,11 @@
 
 	DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
 }
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+		     __rte_unused struct rte_mbuf **pkts,
+		     __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
diff -Nru dpdk-16.11.4/drivers/net/qede/qede_rxtx.h dpdk-16.11.6/drivers/net/qede/qede_rxtx.h
--- dpdk-16.11.4/drivers/net/qede/qede_rxtx.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/qede/qede_rxtx.h	2018-04-19 15:01:06.000000000 +0100
@@ -55,14 +55,21 @@
 	((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
 		<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
 
+#define QEDE_MIN_RX_BUFF_SIZE		(1024)
+#define QEDE_VLAN_TAG_SIZE		(4)
+#define QEDE_LLC_SNAP_HDR_LEN		(8)
+
 /* Max supported alignment is 256 (8 shift)
  * minimal alignment shift 6 is optimal for 57xxx HW performance
  */
 #define QEDE_L1_CACHE_SHIFT	6
 #define QEDE_RX_ALIGN_SHIFT	(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
 #define QEDE_FW_RX_ALIGN_END	(1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD       (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+					~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD	(((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
+				+ (QEDE_LLC_SNAP_HDR_LEN))
 
 /* TBD: Excluding IPV6 */
 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -180,6 +187,10 @@
 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts);
 
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+			      __rte_unused struct rte_mbuf **pkts,
+			      __rte_unused uint16_t nb_pkts);
+
 /* Fastpath resource alloc/dealloc helpers */
 int qede_alloc_fp_resc(struct qede_dev *qdev);
 
diff -Nru dpdk-16.11.4/drivers/net/ring/rte_eth_ring.c dpdk-16.11.6/drivers/net/ring/rte_eth_ring.c
--- dpdk-16.11.4/drivers/net/ring/rte_eth_ring.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/ring/rte_eth_ring.c	2018-04-19 15:01:06.000000000 +0100
@@ -80,7 +80,7 @@
 		.link_speed = ETH_SPEED_NUM_10G,
 		.link_duplex = ETH_LINK_FULL_DUPLEX,
 		.link_status = ETH_LINK_DOWN,
-		.link_autoneg = ETH_LINK_SPEED_AUTONEG
+		.link_autoneg = ETH_LINK_AUTONEG
 };
 
 static uint16_t
diff -Nru dpdk-16.11.4/drivers/net/szedata2/rte_eth_szedata2.c dpdk-16.11.6/drivers/net/szedata2/rte_eth_szedata2.c
--- dpdk-16.11.4/drivers/net/szedata2/rte_eth_szedata2.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/szedata2/rte_eth_szedata2.c	2018-04-19 15:01:06.000000000 +0100
@@ -1169,7 +1169,7 @@
 	link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
 			cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
 
-	link.link_autoneg = ETH_LINK_SPEED_FIXED;
+	link.link_autoneg = ETH_LINK_FIXED;
 
 	rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
 			*(uint64_t *)link_ptr);
@@ -1494,7 +1494,7 @@
 			dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
 			PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 	close(fd);
-	if (pci_resource_ptr == NULL) {
+	if (pci_resource_ptr == MAP_FAILED) {
 		RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
 				rsc_filename, fd);
 		return -EINVAL;
diff -Nru dpdk-16.11.4/drivers/net/thunderx/nicvf_ethdev.c dpdk-16.11.6/drivers/net/thunderx/nicvf_ethdev.c
--- dpdk-16.11.4/drivers/net/thunderx/nicvf_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/thunderx/nicvf_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -98,7 +98,7 @@
 	else if (nic->duplex == NICVF_FULL_DUPLEX)
 		link->link_duplex = ETH_LINK_FULL_DUPLEX;
 	link->link_speed = nic->speed;
-	link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+	link->link_autoneg = ETH_LINK_AUTONEG;
 }
 
 static void
diff -Nru dpdk-16.11.4/drivers/net/thunderx/nicvf_rxtx.c dpdk-16.11.6/drivers/net/thunderx/nicvf_rxtx.c
--- dpdk-16.11.4/drivers/net/thunderx/nicvf_rxtx.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/thunderx/nicvf_rxtx.c	2018-04-19 15:01:06.000000000 +0100
@@ -252,7 +252,7 @@
 
 	/* Inform HW to xmit the packets */
 	nicvf_addr_write(sq->sq_door, used_desc);
-	return nb_pkts;
+	return i;
 }
 
 static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
diff -Nru dpdk-16.11.4/drivers/net/vhost/rte_eth_vhost.c dpdk-16.11.6/drivers/net/vhost/rte_eth_vhost.c
--- dpdk-16.11.4/drivers/net/vhost/rte_eth_vhost.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/vhost/rte_eth_vhost.c	2018-04-19 15:01:06.000000000 +0100
@@ -558,7 +558,7 @@
 		rte_atomic32_set(&vq->allow_queuing, 1);
 	}
 
-	RTE_LOG(INFO, PMD, "New connection established\n");
+	RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
 
 	_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 
@@ -625,7 +625,7 @@
 	state->max_vring = 0;
 	rte_spinlock_unlock(&state->lock);
 
-	RTE_LOG(INFO, PMD, "Connection closed\n");
+	RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
 
 	_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 }
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtio_ethdev.c dpdk-16.11.6/drivers/net/virtio/virtio_ethdev.c
--- dpdk-16.11.4/drivers/net/virtio/virtio_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtio_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -288,17 +288,6 @@
 	/* do nothing */
 }
 
-static int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
-{
-	if (vtpci_queue_idx == hw->max_queue_pairs * 2)
-		return VTNET_CQ;
-	else if (vtpci_queue_idx % 2 == 0)
-		return VTNET_RQ;
-	else
-		return VTNET_TQ;
-}
-
 static uint16_t
 virtio_get_nr_vq(struct virtio_hw *hw)
 {
@@ -847,7 +836,7 @@
 		/* Note: limit checked in rte_eth_xstats_names() */
 
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			struct virtqueue *rxvq = dev->data->rx_queues[i];
+			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
 			if (rxvq == NULL)
 				continue;
 			for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
@@ -860,7 +849,7 @@
 		}
 
 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			struct virtqueue *txvq = dev->data->tx_queues[i];
+			struct virtnet_tx *txvq = dev->data->tx_queues[i];
 			if (txvq == NULL)
 				continue;
 			for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
@@ -1205,6 +1194,11 @@
 	/* Reset the device although not necessary at startup */
 	vtpci_reset(hw);
 
+	if (hw->vqs) {
+		virtio_dev_free_mbufs(eth_dev);
+		virtio_free_queues(hw);
+	}
+
 	/* Tell the host we've noticed this device. */
 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
 
@@ -1565,7 +1559,7 @@
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxvq = dev->data->rx_queues[i];
 		/* Flush the old packets */
-		virtqueue_flush(rxvq->vq);
+		virtqueue_rxvq_flush(rxvq->vq);
 		virtqueue_notify(rxvq->vq);
 	}
 
@@ -1597,12 +1591,15 @@
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
 
+		if (rxvq == NULL || rxvq->vq == NULL)
+			continue;
+
 		PMD_INIT_LOG(DEBUG,
 			     "Before freeing rxq[%d] used and unused buf", i);
 		VIRTQUEUE_DUMP(rxvq->vq);
 
 		PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
-		while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
+		while ((buf = virtqueue_detach_unused(rxvq->vq)) != NULL) {
 			rte_pktmbuf_free(buf);
 			mbuf_num++;
 		}
@@ -1616,13 +1613,16 @@
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
 
+		if (txvq == NULL || txvq->vq == NULL)
+			continue;
+
 		PMD_INIT_LOG(DEBUG,
 			     "Before freeing txq[%d] used and unused bufs",
 			     i);
 		VIRTQUEUE_DUMP(txvq->vq);
 
 		mbuf_num = 0;
-		while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
+		while ((buf = virtqueue_detach_unused(txvq->vq)) != NULL) {
 			rte_pktmbuf_free(buf);
 			mbuf_num++;
 		}
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtio_rxtx.c dpdk-16.11.6/drivers/net/virtio/virtio_rxtx.c
--- dpdk-16.11.4/drivers/net/virtio/virtio_rxtx.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtio_rxtx.c	2018-04-19 15:01:06.000000000 +0100
@@ -61,6 +61,7 @@
 #include "virtio_pci.h"
 #include "virtqueue.h"
 #include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -458,6 +459,8 @@
 			vq->vq_ring.desc[desc_idx].flags =
 				VRING_DESC_F_WRITE;
 		}
+
+		virtio_rxq_vec_setup(rxvq);
 	}
 
 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
@@ -467,30 +470,31 @@
 			&rxvq->fake_mbuf;
 	}
 
-	while (!virtqueue_full(vq)) {
-		m = rte_mbuf_raw_alloc(rxvq->mpool);
-		if (m == NULL)
-			break;
+	if (hw->use_simple_rxtx) {
+		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+			virtio_rxq_rearm_vec(rxvq);
+			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+		}
+	} else {
+		while (!virtqueue_full(vq)) {
+			m = rte_mbuf_raw_alloc(rxvq->mpool);
+			if (m == NULL)
+				break;
 
-		/* Enqueue allocated buffers */
-		if (hw->use_simple_rxtx)
-			error = virtqueue_enqueue_recv_refill_simple(vq, m);
-		else
+			/* Enqueue allocated buffers */
 			error = virtqueue_enqueue_recv_refill(vq, m);
-
-		if (error) {
-			rte_pktmbuf_free(m);
-			break;
+			if (error) {
+				rte_pktmbuf_free(m);
+				break;
+			}
+			nbufs++;
 		}
-		nbufs++;
-	}
 
-	vq_update_avail_idx(vq);
+		vq_update_avail_idx(vq);
+	}
 
 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
 
-	virtio_rxq_vec_setup(rxvq);
-
 	VIRTQUEUE_DUMP(vq);
 
 	return 0;
@@ -506,7 +510,7 @@
 #if defined RTE_ARCH_X86
 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
 		use_simple_rxtx = 1;
-#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+#elif defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
 		use_simple_rxtx = 1;
 #endif
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtio_rxtx.h dpdk-16.11.6/drivers/net/virtio/virtio_rxtx.h
--- dpdk-16.11.4/drivers/net/virtio/virtio_rxtx.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtio_rxtx.h	2018-04-19 15:01:06.000000000 +0100
@@ -88,7 +88,4 @@
 
 int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
 
-int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
-	struct rte_mbuf *m);
-
 #endif /* _VIRTIO_RXTX_H_ */
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtio_rxtx_simple.c dpdk-16.11.6/drivers/net/virtio/virtio_rxtx_simple.c
--- dpdk-16.11.4/drivers/net/virtio/virtio_rxtx_simple.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtio_rxtx_simple.c	2018-04-19 15:01:06.000000000 +0100
@@ -57,34 +57,6 @@
 #pragma GCC diagnostic ignored "-Wcast-qual"
 #endif
 
-int __attribute__((cold))
-virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
-	struct rte_mbuf *cookie)
-{
-	struct vq_desc_extra *dxp;
-	struct vring_desc *start_dp;
-	uint16_t desc_idx;
-
-	cookie->port = vq->rxq.port_id;
-
-	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
-	dxp = &vq->vq_descx[desc_idx];
-	dxp->cookie = (void *)cookie;
-	vq->sw_ring[desc_idx] = cookie;
-
-	start_dp = vq->vq_ring.desc;
-	start_dp[desc_idx].addr =
-		VIRTIO_MBUF_ADDR(cookie, vq) +
-		RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
-	start_dp[desc_idx].len = cookie->buf_len -
-		RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
-
-	vq->vq_free_cnt--;
-	vq->vq_avail_idx++;
-
-	return 0;
-}
-
 uint16_t
 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_pkts)
@@ -102,7 +74,7 @@
 	rte_compiler_barrier();
 
 	if (nb_used >= VIRTIO_TX_FREE_THRESH)
-		virtio_xmit_cleanup(vq);
+		virtio_xmit_cleanup_simple(vq);
 
 	nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
 	desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtio_rxtx_simple.h dpdk-16.11.6/drivers/net/virtio/virtio_rxtx_simple.h
--- dpdk-16.11.4/drivers/net/virtio/virtio_rxtx_simple.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtio_rxtx_simple.h	2018-04-19 15:01:06.000000000 +0100
@@ -89,7 +89,7 @@
 #define VIRTIO_TX_FREE_NR 32
 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
 static inline void
-virtio_xmit_cleanup(struct virtqueue *vq)
+virtio_xmit_cleanup_simple(struct virtqueue *vq)
 {
 	uint16_t i, desc_idx;
 	uint32_t nb_free = 0;
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtio_user/virtio_user_dev.c dpdk-16.11.6/drivers/net/virtio/virtio_user/virtio_user_dev.c
--- dpdk-16.11.4/drivers/net/virtio/virtio_user/virtio_user_dev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtio_user/virtio_user_dev.c	2018-04-19 15:01:06.000000000 +0100
@@ -142,6 +142,9 @@
 	uint64_t features;
 	int ret;
 
+	/* Do not check return as already done in init, or reset in stop */
+	vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL);
+
 	/* Step 0: tell vhost to create queues */
 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
 		goto error;
@@ -240,6 +243,7 @@
 		PMD_INIT_LOG(ERR, "backend set up fails");
 		return -1;
 	}
+
 	if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
 		PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
 		return -1;
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtqueue.c dpdk-16.11.6/drivers/net/virtio/virtqueue.c
--- dpdk-16.11.4/drivers/net/virtio/virtqueue.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtqueue.c	2018-04-19 15:01:06.000000000 +0100
@@ -37,6 +37,7 @@
 #include "virtqueue.h"
 #include "virtio_logs.h"
 #include "virtio_pci.h"
+#include "virtio_rxtx_simple.h"
 
 void
 virtqueue_disable_intr(struct virtqueue *vq)
@@ -55,26 +56,50 @@
  * 2) mbuf that hasn't been consued by backend.
  */
 struct rte_mbuf *
-virtqueue_detatch_unused(struct virtqueue *vq)
+virtqueue_detach_unused(struct virtqueue *vq)
 {
 	struct rte_mbuf *cookie;
-	int idx;
-
-	if (vq != NULL)
-		for (idx = 0; idx < vq->vq_nentries; idx++) {
+	struct virtio_hw *hw;
+	uint16_t start, end;
+	int type, idx;
+
+	if (vq == NULL)
+		return NULL;
+
+	hw = vq->hw;
+	type = virtio_get_queue_type(hw, vq->vq_queue_index);
+	start = vq->vq_avail_idx & (vq->vq_nentries - 1);
+	end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
+
+	for (idx = 0; idx < vq->vq_nentries; idx++) {
+		if (hw->use_simple_rxtx && type == VTNET_RQ) {
+			if (start <= end && idx >= start && idx < end)
+				continue;
+			if (start > end && (idx >= start || idx < end))
+				continue;
+			cookie = vq->sw_ring[idx];
+			if (cookie != NULL) {
+				vq->sw_ring[idx] = NULL;
+				return cookie;
+			}
+		} else {
 			cookie = vq->vq_descx[idx].cookie;
 			if (cookie != NULL) {
 				vq->vq_descx[idx].cookie = NULL;
 				return cookie;
 			}
 		}
+	}
+
 	return NULL;
 }
 
 /* Flush the elements in the used ring. */
 void
-virtqueue_flush(struct virtqueue *vq)
+virtqueue_rxvq_flush(struct virtqueue *vq)
 {
+	struct virtnet_rx *rxq = &vq->rxq;
+	struct virtio_hw *hw = vq->hw;
 	struct vring_used_elem *uep;
 	struct vq_desc_extra *dxp;
 	uint16_t used_idx, desc_idx;
@@ -85,13 +110,27 @@
 	for (i = 0; i < nb_used; i++) {
 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
 		uep = &vq->vq_ring.used->ring[used_idx];
-		desc_idx = (uint16_t)uep->id;
-		dxp = &vq->vq_descx[desc_idx];
-		if (dxp->cookie != NULL) {
-			rte_pktmbuf_free(dxp->cookie);
-			dxp->cookie = NULL;
+		if (hw->use_simple_rxtx) {
+			desc_idx = used_idx;
+			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+			vq->vq_free_cnt++;
+		} else {
+			desc_idx = (uint16_t)uep->id;
+			dxp = &vq->vq_descx[desc_idx];
+			if (dxp->cookie != NULL) {
+				rte_pktmbuf_free(dxp->cookie);
+				dxp->cookie = NULL;
+			}
+			vq_ring_free_chain(vq, desc_idx);
 		}
 		vq->vq_used_cons_idx++;
-		vq_ring_free_chain(vq, desc_idx);
+	}
+
+	if (hw->use_simple_rxtx) {
+		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+			virtio_rxq_rearm_vec(rxq);
+			if (virtqueue_kick_prepare(vq))
+				virtqueue_notify(vq);
+		}
 	}
 }
diff -Nru dpdk-16.11.4/drivers/net/virtio/virtqueue.h dpdk-16.11.6/drivers/net/virtio/virtqueue.h
--- dpdk-16.11.4/drivers/net/virtio/virtqueue.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/virtio/virtqueue.h	2018-04-19 15:01:06.000000000 +0100
@@ -288,10 +288,10 @@
 /**
  *  Get all mbufs to be freed.
  */
-struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 
 /* Flush the elements in the used ring. */
-void virtqueue_flush(struct virtqueue *vq);
+void virtqueue_rxvq_flush(struct virtqueue *vq);
 
 static inline int
 virtqueue_full(const struct virtqueue *vq)
@@ -299,6 +299,17 @@
 	return vq->vq_free_cnt == 0;
 }
 
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+	if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+		return VTNET_CQ;
+	else if (vtpci_queue_idx % 2 == 0)
+		return VTNET_RQ;
+	else
+		return VTNET_TQ;
+}
+
 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
 
 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
diff -Nru dpdk-16.11.4/drivers/net/vmxnet3/vmxnet3_ethdev.c dpdk-16.11.6/drivers/net/vmxnet3/vmxnet3_ethdev.c
--- dpdk-16.11.4/drivers/net/vmxnet3/vmxnet3_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/vmxnet3/vmxnet3_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -790,7 +790,7 @@
 		link.link_status = ETH_LINK_UP;
 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
 		link.link_speed = ETH_SPEED_NUM_10G;
-		link.link_autoneg = ETH_LINK_SPEED_FIXED;
+		link.link_autoneg = ETH_LINK_AUTONEG;
 	}
 
 	vmxnet3_dev_atomic_write_link_status(dev, &link);
diff -Nru dpdk-16.11.4/drivers/net/xenvirt/virtqueue.h dpdk-16.11.6/drivers/net/xenvirt/virtqueue.h
--- dpdk-16.11.4/drivers/net/xenvirt/virtqueue.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/drivers/net/xenvirt/virtqueue.h	2018-04-19 15:01:06.000000000 +0100
@@ -121,7 +121,7 @@
 /**
  *  Get all mbufs to be freed.
  */
-struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf * virtqueue_detach_unused(struct virtqueue *vq);
 
 static inline int __attribute__((always_inline))
 virtqueue_full(const struct virtqueue *vq)
diff -Nru dpdk-16.11.4/examples/bond/main.c dpdk-16.11.6/examples/bond/main.c
--- dpdk-16.11.4/examples/bond/main.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/bond/main.c	2018-04-19 15:01:06.000000000 +0100
@@ -437,6 +437,11 @@
 				(BOND_IP_3 << 16) | (BOND_IP_4 << 24);
 
 	created_pkt = rte_pktmbuf_alloc(mbuf_pool);
+	if (created_pkt == NULL) {
+		cmdline_printf(cl, "Failed to allocate mbuf\n");
+		return;
+	}
+
 	pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
 	created_pkt->data_len = pkt_size;
 	created_pkt->pkt_len = pkt_size;
diff -Nru dpdk-16.11.4/examples/exception_path/main.c dpdk-16.11.6/examples/exception_path/main.c
--- dpdk-16.11.4/examples/exception_path/main.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/exception_path/main.c	2018-04-19 15:01:06.000000000 +0100
@@ -141,7 +141,7 @@
 	uint64_t rx;
 	uint64_t tx;
 	uint64_t dropped;
-};
+} __rte_cache_aligned;
 
 /* Array of lcore-specific stats */
 static struct stats lcore_stats[RTE_MAX_LCORE];
diff -Nru dpdk-16.11.4/examples/ip_pipeline/init.c dpdk-16.11.6/examples/ip_pipeline/init.c
--- dpdk-16.11.4/examples/ip_pipeline/init.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/ip_pipeline/init.c	2018-04-19 15:01:06.000000000 +0100
@@ -1701,7 +1701,7 @@
 		data->ptype = ptype;
 
 		data->timer_period = (rte_get_tsc_hz() *
-			params->timer_period) / 100;
+			params->timer_period) / 1000;
 	}
 }
 
diff -Nru dpdk-16.11.4/examples/ipsec-secgw/ipsec-secgw.c dpdk-16.11.6/examples/ipsec-secgw/ipsec-secgw.c
--- dpdk-16.11.4/examples/ipsec-secgw/ipsec-secgw.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/ipsec-secgw/ipsec-secgw.c	2018-04-19 15:01:06.000000000 +0100
@@ -409,7 +409,8 @@
 		}
 		/* Only check SPI match for processed IPSec packets */
 		sa_idx = ip->res[i] & PROTECT_MASK;
-		if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
+		if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
+				!inbound_sa_check(sa, m, sa_idx)) {
 			rte_pktmbuf_free(m);
 			continue;
 		}
@@ -474,9 +475,9 @@
 	for (i = 0; i < ip->num; i++) {
 		m = ip->pkts[i];
 		sa_idx = ip->res[i] & PROTECT_MASK;
-		if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
+		if (ip->res[i] & DISCARD)
 			rte_pktmbuf_free(m);
-		else if (sa_idx != 0) {
+		else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
 			ipsec->res[ipsec->num] = sa_idx;
 			ipsec->pkts[ipsec->num++] = m;
 		} else /* BYPASS */
diff -Nru dpdk-16.11.4/examples/ipsec-secgw/sa.c dpdk-16.11.6/examples/ipsec-secgw/sa.c
--- dpdk-16.11.4/examples/ipsec-secgw/sa.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/ipsec-secgw/sa.c	2018-04-19 15:01:06.000000000 +0100
@@ -232,6 +232,8 @@
 	APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
 	if (status->status < 0)
 		return;
+	if (atoi(tokens[1]) == INVALID_SPI)
+		return;
 	rule->spi = atoi(tokens[1]);
 
 	for (ti = 2; ti < n_tokens; ti++) {
diff -Nru dpdk-16.11.4/examples/l3fwd-power/main.c dpdk-16.11.6/examples/l3fwd-power/main.c
--- dpdk-16.11.4/examples/l3fwd-power/main.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/l3fwd-power/main.c	2018-04-19 15:01:06.000000000 +0100
@@ -83,8 +83,6 @@
 
 #define MIN_ZERO_POLL_COUNT 10
 
-/* around 100ms at 2 Ghz */
-#define TIMER_RESOLUTION_CYCLES           200000000ULL
 /* 100 ms interval */
 #define TIMER_NUMBER_PER_SECOND           10
 /* 100000 us */
@@ -824,7 +822,7 @@
 {
 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 	unsigned lcore_id;
-	uint64_t prev_tsc, diff_tsc, cur_tsc;
+	uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
 	uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
 	int i, j, nb_rx;
 	uint8_t portid, queueid;
@@ -838,6 +836,8 @@
 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
 
 	prev_tsc = 0;
+	hz = rte_get_timer_hz();
+	tim_res_tsc = hz/TIMER_NUMBER_PER_SECOND;
 
 	lcore_id = rte_lcore_id();
 	qconf = &lcore_conf[lcore_id];
@@ -883,7 +883,7 @@
 		}
 
 		diff_tsc_power = cur_tsc_power - prev_tsc_power;
-		if (diff_tsc_power > TIMER_RESOLUTION_CYCLES) {
+		if (diff_tsc_power > tim_res_tsc) {
 			rte_timer_manage();
 			prev_tsc_power = cur_tsc_power;
 		}
@@ -999,9 +999,11 @@
 					turn_on_intr(qconf);
 					sleep_until_rx_interrupt(
 						qconf->n_rx_queue);
+					/**
+					 * start receiving packets immediately
+					 */
+					goto start_rx;
 				}
-				/* start receiving packets immediately */
-				goto start_rx;
 			}
 			stats[lcore_id].sleep_time += lcore_idle_hint;
 		}
diff -Nru dpdk-16.11.4/examples/vhost/main.c dpdk-16.11.6/examples/vhost/main.c
--- dpdk-16.11.4/examples/vhost/main.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/examples/vhost/main.c	2018-04-19 15:01:06.000000000 +0100
@@ -277,12 +277,6 @@
 	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
 	rte_eth_dev_info_get (port, &dev_info);
 
-	if (dev_info.max_rx_queues > MAX_QUEUES) {
-		rte_exit(EXIT_FAILURE,
-			"please define MAX_QUEUES no less than %u in %s\n",
-			dev_info.max_rx_queues, __FILE__);
-	}
-
 	rxconf = &dev_info.default_rxconf;
 	txconf = &dev_info.default_txconf;
 	rxconf->rx_drop_en = 1;
@@ -954,7 +948,8 @@
 		struct vhost_dev *vdev2;
 
 		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
-			virtio_xmit(vdev2, vdev, m);
+			if (vdev2 != vdev)
+				virtio_xmit(vdev2, vdev, m);
 		}
 		goto queue2nic;
 	}
diff -Nru dpdk-16.11.4/lib/librte_eal/bsdapp/contigmem/contigmem.c dpdk-16.11.6/lib/librte_eal/bsdapp/contigmem/contigmem.c
--- dpdk-16.11.4/lib/librte_eal/bsdapp/contigmem/contigmem.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/bsdapp/contigmem/contigmem.c	2018-04-19 15:01:06.000000000 +0100
@@ -45,6 +45,7 @@
 #include <sys/rwlock.h>
 #include <sys/systm.h>
 #include <sys/sysctl.h>
+#include <sys/vmmeter.h>
 
 #include <machine/bus.h>
 
diff -Nru dpdk-16.11.4/lib/librte_eal/bsdapp/eal/eal_memory.c dpdk-16.11.6/lib/librte_eal/bsdapp/eal/eal_memory.c
--- dpdk-16.11.4/lib/librte_eal/bsdapp/eal/eal_memory.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/bsdapp/eal/eal_memory.c	2018-04-19 15:01:06.000000000 +0100
@@ -150,7 +150,7 @@
 	/* Map the shared hugepage_info into the process address spaces */
 	hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
 			fd_hugepage_info, 0);
-	if (hpi == NULL) {
+	if (hpi == MAP_FAILED) {
 		RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
 		goto error;
 	}
diff -Nru dpdk-16.11.4/lib/librte_eal/common/eal_common_memzone.c dpdk-16.11.6/lib/librte_eal/common/eal_common_memzone.c
--- dpdk-16.11.4/lib/librte_eal/common/eal_common_memzone.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/eal_common_memzone.c	2018-04-19 15:01:06.000000000 +0100
@@ -236,7 +236,7 @@
 		return NULL;
 	}
 
-	const struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
+	struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
 
 	/* fill the zone in config */
 	mz = get_next_free_memzone();
@@ -244,6 +244,7 @@
 	if (mz == NULL) {
 		RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
 				"in config!\n", __func__);
+		malloc_elem_free(elem);
 		rte_errno = ENOSPC;
 		return NULL;
 	}
diff -Nru dpdk-16.11.4/lib/librte_eal/common/eal_common_pci_uio.c dpdk-16.11.6/lib/librte_eal/common/eal_common_pci_uio.c
--- dpdk-16.11.4/lib/librte_eal/common/eal_common_pci_uio.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/eal_common_pci_uio.c	2018-04-19 15:01:06.000000000 +0100
@@ -117,7 +117,6 @@
 
 	dev->intr_handle.fd = -1;
 	dev->intr_handle.uio_cfg_fd = -1;
-	dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
 
 	/* secondary processes - use already recorded details */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
diff -Nru dpdk-16.11.4/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h dpdk-16.11.6/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
--- dpdk-16.11.4/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h	2018-04-19 15:01:06.000000000 +0100
@@ -64,9 +64,9 @@
  * occur before the STORE operations generated after.
  */
 #ifdef RTE_ARCH_64
-#define	rte_wmb() {asm volatile("lwsync" : : : "memory"); }
+#define	rte_wmb() asm volatile("lwsync" : : : "memory")
 #else
-#define	rte_wmb() {asm volatile("sync" : : : "memory"); }
+#define	rte_wmb() asm volatile("sync" : : : "memory")
 #endif
 
 /**
@@ -76,9 +76,9 @@
  * occur before the LOAD operations generated after.
  */
 #ifdef RTE_ARCH_64
-#define	rte_rmb() {asm volatile("lwsync" : : : "memory"); }
+#define	rte_rmb() asm volatile("lwsync" : : : "memory")
 #else
-#define	rte_rmb() {asm volatile("sync" : : : "memory"); }
+#define	rte_rmb() asm volatile("sync" : : : "memory")
 #endif
 
 #define rte_smp_mb() rte_mb()
diff -Nru dpdk-16.11.4/lib/librte_eal/common/include/arch/x86/rte_atomic.h dpdk-16.11.6/lib/librte_eal/common/include/arch/x86/rte_atomic.h
--- dpdk-16.11.4/lib/librte_eal/common/include/arch/x86/rte_atomic.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/include/arch/x86/rte_atomic.h	2018-04-19 15:01:06.000000000 +0100
@@ -55,12 +55,52 @@
 
 #define	rte_rmb() _mm_lfence()
 
-#define rte_smp_mb() rte_mb()
-
 #define rte_smp_wmb() rte_compiler_barrier()
 
 #define rte_smp_rmb() rte_compiler_barrier()
 
+/*
+ * From Intel Software Development Manual; Vol 3;
+ * 8.2.2 Memory Ordering in P6 and More Recent Processor Families:
+ * ...
+ * . Reads are not reordered with other reads.
+ * . Writes are not reordered with older reads.
+ * . Writes to memory are not reordered with other writes,
+ *   with the following exceptions:
+ *   . streaming stores (writes) executed with the non-temporal move
+ *     instructions (MOVNTI, MOVNTQ, MOVNTDQ, MOVNTPS, and MOVNTPD); and
+ *   . string operations (see Section 8.2.4.1).
+ *  ...
+ * . Reads may be reordered with older writes to different locations but not
+ * with older writes to the same location.
+ * . Reads or writes cannot be reordered with I/O instructions,
+ * locked instructions, or serializing instructions.
+ * . Reads cannot pass earlier LFENCE and MFENCE instructions.
+ * . Writes ... cannot pass earlier LFENCE, SFENCE, and MFENCE instructions.
+ * . LFENCE instructions cannot pass earlier reads.
+ * . SFENCE instructions cannot pass earlier writes ...
+ * . MFENCE instructions cannot pass earlier reads, writes ...
+ *
+ * As pointed by Java guys, that makes possible to use lock-prefixed
+ * instructions to get the same effect as mfence and on most modern HW
+ * that gives a better perfomance then using mfence:
+ * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
+ * Basic idea is to use lock prefixed add with some dummy memory location
+ * as the destination. From their experiments 128B(2 cache lines) below
+ * current stack pointer looks like a good candidate.
+ * So below we use that techinque for rte_smp_mb() implementation.
+ */
+
+static inline void __attribute__((always_inline))
+rte_smp_mb(void)
+{
+#ifdef RTE_ARCH_I686
+	asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
+#else
+	asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
+#endif
+}
+
 /*------------------------- 16 bit atomic operations -------------------------*/
 
 #ifndef RTE_FORCE_INTRINSICS
diff -Nru dpdk-16.11.4/lib/librte_eal/common/include/rte_debug.h dpdk-16.11.6/lib/librte_eal/common/include/rte_debug.h
--- dpdk-16.11.4/lib/librte_eal/common/include/rte_debug.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/include/rte_debug.h	2018-04-19 15:01:06.000000000 +0100
@@ -86,7 +86,7 @@
 #endif
 #define	RTE_VERIFY(exp)	do {                                                  \
 	if (unlikely(!(exp)))                                                           \
-		rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
+		rte_panic("line %d\tassert \"%s\" failed\n", __LINE__, #exp); \
 } while (0)
 
 /*
diff -Nru dpdk-16.11.4/lib/librte_eal/common/include/rte_version.h dpdk-16.11.6/lib/librte_eal/common/include/rte_version.h
--- dpdk-16.11.4/lib/librte_eal/common/include/rte_version.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/include/rte_version.h	2018-04-19 15:01:06.000000000 +0100
@@ -66,7 +66,7 @@
 /**
  * Patch level number i.e. the z in yy.mm.z
  */
-#define RTE_VER_MINOR 4
+#define RTE_VER_MINOR 6
 
 /**
  * Extra string to be appended to version number
diff -Nru dpdk-16.11.4/lib/librte_eal/common/malloc_elem.c dpdk-16.11.6/lib/librte_eal/common/malloc_elem.c
--- dpdk-16.11.4/lib/librte_eal/common/malloc_elem.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/malloc_elem.c	2018-04-19 15:01:06.000000000 +0100
@@ -98,6 +98,7 @@
 	if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
 		end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
 		new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
+		end_pt = new_data_start + size;
 		if (((end_pt - 1) & bmask) != (new_data_start & bmask))
 			return NULL;
 	}
diff -Nru dpdk-16.11.4/lib/librte_eal/common/malloc_heap.c dpdk-16.11.6/lib/librte_eal/common/malloc_heap.c
--- dpdk-16.11.4/lib/librte_eal/common/malloc_heap.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/malloc_heap.c	2018-04-19 15:01:06.000000000 +0100
@@ -178,12 +178,14 @@
  * Function to retrieve data for heap on given socket
  */
 int
-malloc_heap_get_stats(const struct malloc_heap *heap,
+malloc_heap_get_stats(struct malloc_heap *heap,
 		struct rte_malloc_socket_stats *socket_stats)
 {
 	size_t idx;
 	struct malloc_elem *elem;
 
+	rte_spinlock_lock(&heap->lock);
+
 	/* Initialise variables for heap */
 	socket_stats->free_count = 0;
 	socket_stats->heap_freesz_bytes = 0;
@@ -205,6 +207,8 @@
 	socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
 			socket_stats->heap_freesz_bytes);
 	socket_stats->alloc_count = heap->alloc_count;
+
+	rte_spinlock_unlock(&heap->lock);
 	return 0;
 }
 
diff -Nru dpdk-16.11.4/lib/librte_eal/common/malloc_heap.h dpdk-16.11.6/lib/librte_eal/common/malloc_heap.h
--- dpdk-16.11.4/lib/librte_eal/common/malloc_heap.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/malloc_heap.h	2018-04-19 15:01:06.000000000 +0100
@@ -57,7 +57,7 @@
 		unsigned flags, size_t align, size_t bound);
 
 int
-malloc_heap_get_stats(const struct malloc_heap *heap,
+malloc_heap_get_stats(struct malloc_heap *heap,
 		struct rte_malloc_socket_stats *socket_stats);
 
 int
diff -Nru dpdk-16.11.4/lib/librte_eal/common/rte_keepalive.c dpdk-16.11.6/lib/librte_eal/common/rte_keepalive.c
--- dpdk-16.11.4/lib/librte_eal/common/rte_keepalive.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/common/rte_keepalive.c	2018-04-19 15:01:06.000000000 +0100
@@ -42,8 +42,12 @@
 
 struct rte_keepalive {
 	/** Core Liveness. */
-	enum rte_keepalive_state __rte_cache_aligned state_flags[
-		RTE_KEEPALIVE_MAXCORES];
+	struct {
+		/*
+		 * Each element must be cache aligned to prevent false sharing.
+		 */
+		enum rte_keepalive_state core_state __rte_cache_aligned;
+	} live_data[RTE_KEEPALIVE_MAXCORES];
 
 	/** Last-seen-alive timestamps */
 	uint64_t last_alive[RTE_KEEPALIVE_MAXCORES];
@@ -96,19 +100,22 @@
 		if (keepcfg->active_cores[idx_core] == 0)
 			continue;
 
-		switch (keepcfg->state_flags[idx_core]) {
+		switch (keepcfg->live_data[idx_core].core_state) {
 		case RTE_KA_STATE_UNUSED:
 			break;
 		case RTE_KA_STATE_ALIVE: /* Alive */
-			keepcfg->state_flags[idx_core] = RTE_KA_STATE_MISSING;
+			keepcfg->live_data[idx_core].core_state =
+			    RTE_KA_STATE_MISSING;
 			keepcfg->last_alive[idx_core] = rte_rdtsc();
 			break;
 		case RTE_KA_STATE_MISSING: /* MIA */
 			print_trace("Core MIA. ", keepcfg, idx_core);
-			keepcfg->state_flags[idx_core] = RTE_KA_STATE_DEAD;
+			keepcfg->live_data[idx_core].core_state =
+			    RTE_KA_STATE_DEAD;
 			break;
 		case RTE_KA_STATE_DEAD: /* Dead */
-			keepcfg->state_flags[idx_core] = RTE_KA_STATE_GONE;
+			keepcfg->live_data[idx_core].core_state =
+			    RTE_KA_STATE_GONE;
 			print_trace("Core died. ", keepcfg, idx_core);
 			if (keepcfg->callback)
 				keepcfg->callback(
@@ -119,7 +126,8 @@
 		case RTE_KA_STATE_GONE: /* Buried */
 			break;
 		case RTE_KA_STATE_DOZING: /* Core going idle */
-			keepcfg->state_flags[idx_core] = RTE_KA_STATE_SLEEP;
+			keepcfg->live_data[idx_core].core_state =
+			    RTE_KA_STATE_SLEEP;
 			keepcfg->last_alive[idx_core] = rte_rdtsc();
 			break;
 		case RTE_KA_STATE_SLEEP: /* Idled core */
@@ -129,7 +137,7 @@
 			keepcfg->relay_callback(
 				keepcfg->relay_callback_data,
 				idx_core,
-				keepcfg->state_flags[idx_core],
+				keepcfg->live_data[idx_core].core_state,
 				keepcfg->last_alive[idx_core]
 				);
 	}
@@ -173,11 +181,11 @@
 void
 rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
 {
-	keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_ALIVE;
+	keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_ALIVE;
 }
 
 void
 rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg)
 {
-	keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_DOZING;
+	keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_DOZING;
 }
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/eal/eal_pci.c dpdk-16.11.6/lib/librte_eal/linuxapp/eal/eal_pci.c
--- dpdk-16.11.4/lib/librte_eal/linuxapp/eal/eal_pci.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/eal/eal_pci.c	2018-04-19 15:01:06.000000000 +0100
@@ -623,7 +623,6 @@
 	if (!found)
 		return -1;
 
-	dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
 	p->base = start;
 	RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
 
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/eal/eal_vfio.c dpdk-16.11.6/lib/librte_eal/linuxapp/eal/eal_vfio.c
--- dpdk-16.11.4/lib/librte_eal/linuxapp/eal/eal_vfio.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/eal/eal_vfio.c	2018-04-19 15:01:06.000000000 +0100
@@ -50,12 +50,15 @@
 static struct vfio_config vfio_cfg;
 
 static int vfio_type1_dma_map(int);
+static int vfio_spapr_dma_map(int);
 static int vfio_noiommu_dma_map(int);
 
 /* IOMMU types we support */
 static const struct vfio_iommu_type iommu_types[] = {
 	/* x86 IOMMU, otherwise known as type 1 */
 	{ RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
+	/* ppc64 IOMMU, otherwise known as spapr */
+	{ RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
 	/* IOMMU-less mode */
 	{ RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
 };
@@ -339,7 +342,7 @@
 int
 vfio_is_enabled(const char *modname)
 {
-	const int mod_available = rte_eal_check_module(modname);
+	const int mod_available = rte_eal_check_module(modname) > 0;
 	return vfio_cfg.vfio_enabled && mod_available;
 }
 
@@ -537,6 +540,93 @@
 	}
 
 	return 0;
+}
+
+static int
+vfio_spapr_dma_map(int vfio_container_fd)
+{
+	const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+	int i, ret;
+
+	struct vfio_iommu_spapr_register_memory reg = {
+		.argsz = sizeof(reg),
+		.flags = 0
+	};
+	struct vfio_iommu_spapr_tce_info info = {
+		.argsz = sizeof(info),
+	};
+	struct vfio_iommu_spapr_tce_create create = {
+		.argsz = sizeof(create),
+	};
+	struct vfio_iommu_spapr_tce_remove remove = {
+		.argsz = sizeof(remove),
+	};
+
+	/* query spapr iommu info */
+	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
+	if (ret) {
+		RTE_LOG(ERR, EAL, "  cannot get iommu info, "
+				"error %i (%s)\n", errno, strerror(errno));
+		return -1;
+	}
+
+	/* remove default DMA of 32 bit window */
+	remove.start_addr = info.dma32_window_start;
+	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
+	if (ret) {
+		RTE_LOG(ERR, EAL, "  cannot remove default DMA window, "
+				"error %i (%s)\n", errno, strerror(errno));
+		return -1;
+	}
+
+	/* calculate window size based on number of hugepages configured */
+	create.window_size = rte_eal_get_physmem_size();
+	create.page_shift = __builtin_ctzll(ms->hugepage_sz);
+	create.levels = 2;
+
+	ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+	if (ret) {
+		RTE_LOG(ERR, EAL, "  cannot create new DMA window, "
+				"error %i (%s)\n", errno, strerror(errno));
+		return -1;
+	}
+
+	/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
+	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+		struct vfio_iommu_type1_dma_map dma_map;
+
+		if (ms[i].addr == NULL)
+			break;
+
+		reg.vaddr = (uintptr_t) ms[i].addr;
+		reg.size = ms[i].len;
+		ret = ioctl(vfio_container_fd,
+			VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
+		if (ret) {
+			RTE_LOG(ERR, EAL, "  cannot register vaddr for IOMMU, "
+				"error %i (%s)\n", errno, strerror(errno));
+			return -1;
+		}
+
+		memset(&dma_map, 0, sizeof(dma_map));
+		dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
+		dma_map.vaddr = ms[i].addr_64;
+		dma_map.size = ms[i].len;
+		dma_map.iova = ms[i].phys_addr;
+		dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
+				 VFIO_DMA_MAP_FLAG_WRITE;
+
+		ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+
+		if (ret) {
+			RTE_LOG(ERR, EAL, "  cannot set up DMA remapping, "
+				"error %i (%s)\n", errno, strerror(errno));
+			return -1;
+		}
+
+	}
+
+	return 0;
 }
 
 static int
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/eal/eal_vfio.h dpdk-16.11.6/lib/librte_eal/linuxapp/eal/eal_vfio.h
--- dpdk-16.11.4/lib/librte_eal/linuxapp/eal/eal_vfio.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/eal/eal_vfio.h	2018-04-19 15:01:06.000000000 +0100
@@ -54,6 +54,31 @@
 
 #define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
 
+#ifndef VFIO_SPAPR_TCE_v2_IOMMU
+#define RTE_VFIO_SPAPR 7
+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
+struct vfio_iommu_spapr_register_memory {
+	uint32_t argsz;
+	uint32_t flags;
+	uint64_t vaddr;
+	uint64_t size;
+};
+struct vfio_iommu_spapr_tce_create {
+	uint32_t argsz;
+	uint32_t page_shift;
+	uint64_t window_size;
+	uint32_t levels;
+};
+struct vfio_iommu_spapr_tce_remove {
+	uint32_t argsz;
+	uint64_t start_addr;
+};
+#else
+#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU
+#endif
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
 #define RTE_VFIO_NOIOMMU 8
 #else
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/igb_uio/compat.h dpdk-16.11.6/lib/librte_eal/linuxapp/igb_uio/compat.h
--- dpdk-16.11.4/lib/librte_eal/linuxapp/igb_uio/compat.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/igb_uio/compat.h	2018-04-19 15:01:06.000000000 +0100
@@ -123,3 +123,7 @@
 }
 
 #endif /* < 3.3.0 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+#define HAVE_ALLOC_IRQ_VECTORS 1
+#endif
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/igb_uio/igb_uio.c dpdk-16.11.6/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
--- dpdk-16.11.4/lib/librte_eal/linuxapp/igb_uio/igb_uio.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/igb_uio/igb_uio.c	2018-04-19 15:01:06.000000000 +0100
@@ -325,7 +325,9 @@
 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	struct rte_uio_pci_dev *udev;
+#ifndef HAVE_ALLOC_IRQ_VECTORS
 	struct msix_entry msix_entry;
+#endif
 	int err;
 
 	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
@@ -379,6 +381,7 @@
 	switch (igbuio_intr_mode_preferred) {
 	case RTE_INTR_MODE_MSIX:
 		/* Only 1 msi-x vector needed */
+#ifndef HAVE_ALLOC_IRQ_VECTORS
 		msix_entry.entry = 0;
 		if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
 			dev_dbg(&dev->dev, "using MSI-X");
@@ -386,6 +389,15 @@
 			udev->mode = RTE_INTR_MODE_MSIX;
 			break;
 		}
+#else
+		if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) {
+			dev_dbg(&dev->dev, "using MSI-X");
+			udev->info.irq_flags = IRQF_NO_THREAD;
+			udev->info.irq = pci_irq_vector(dev, 0);
+			udev->mode = RTE_INTR_MODE_MSIX;
+			break;
+		}
+#endif
 		/* fall back to INTX */
 	case RTE_INTR_MODE_LEGACY:
 		if (pci_intx_mask_supported(dev)) {
@@ -429,8 +441,13 @@
 	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
 fail_release_iomem:
 	igbuio_pci_release_iomem(&udev->info);
+#ifndef HAVE_ALLOC_IRQ_VECTORS
 	if (udev->mode == RTE_INTR_MODE_MSIX)
 		pci_disable_msix(udev->pdev);
+#else
+	if (udev->mode == RTE_INTR_MODE_MSIX)
+		pci_free_irq_vectors(udev->pdev);
+#endif
 	pci_disable_device(dev);
 fail_free:
 	kfree(udev);
@@ -446,8 +463,13 @@
 	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
 	uio_unregister_device(&udev->info);
 	igbuio_pci_release_iomem(&udev->info);
+#ifndef HAVE_ALLOC_IRQ_VECTORS
 	if (udev->mode == RTE_INTR_MODE_MSIX)
 		pci_disable_msix(dev);
+#else
+	if (udev->mode == RTE_INTR_MODE_MSIX)
+		pci_free_irq_vectors(dev);
+#endif
 	pci_disable_device(dev);
 	pci_set_drvdata(dev, NULL);
 	kfree(udev);
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c dpdk-16.11.6/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
--- dpdk-16.11.4/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c	2018-04-19 15:01:06.000000000 +0100
@@ -137,11 +137,20 @@
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
 static void igb_set_rx_mode(struct net_device *);
+#ifdef HAVE_TIMER_SETUP
+static void igb_update_phy_info(struct timer_list *);
+static void igb_watchdog(struct timer_list *);
+#else
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
+#endif
 static void igb_watchdog_task(struct work_struct *);
 static void igb_dma_err_task(struct work_struct *);
+#ifdef HAVE_TIMER_SETUP
+static void igb_dma_err_timer(struct timer_list *);
+#else
 static void igb_dma_err_timer(unsigned long data);
+#endif
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
@@ -2806,6 +2815,12 @@
 	/* Check if Media Autosense is enabled */
 	if (hw->mac.type == e1000_82580)
 		igb_init_mas(adapter);
+#ifdef HAVE_TIMER_SETUP
+	timer_setup(&adapter->watchdog_timer, &igb_watchdog, 0);
+	if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+		timer_setup(&adapter->dma_err_timer, &igb_dma_err_timer, 0);
+	timer_setup(&adapter->phy_info_timer, &igb_update_phy_info, 0);
+#else
 	setup_timer(&adapter->watchdog_timer, &igb_watchdog,
 	            (unsigned long) adapter);
 	if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
@@ -2813,6 +2828,7 @@
 			    (unsigned long) adapter);
 	setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
 	            (unsigned long) adapter);
+#endif
 
 	INIT_WORK(&adapter->reset_task, igb_reset_task);
 	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -4543,9 +4559,15 @@
 
 /* Need to wait a few seconds after link up to get diagnostic information from
  * the phy */
+#ifdef HAVE_TIMER_SETUP
+static void igb_update_phy_info(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+#else
 static void igb_update_phy_info(unsigned long data)
 {
 	struct igb_adapter *adapter = (struct igb_adapter *) data;
+#endif
 	e1000_get_phy_info(&adapter->hw);
 }
 
@@ -4594,9 +4616,15 @@
  * igb_watchdog - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
+#ifdef HAVE_TIMER_SETUP
+static void igb_watchdog(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+#else
 static void igb_watchdog(unsigned long data)
 {
 	struct igb_adapter *adapter = (struct igb_adapter *)data;
+#endif
 	/* Do the rest outside of interrupt context */
 	schedule_work(&adapter->watchdog_task);
 }
@@ -4854,9 +4882,15 @@
  * igb_dma_err_timer - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
+#ifdef HAVE_TIMER_SETUP
+static void igb_dma_err_timer(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, dma_err_timer);
+#else
 static void igb_dma_err_timer(unsigned long data)
 {
 	struct igb_adapter *adapter = (struct igb_adapter *)data;
+#endif
 	/* Do the rest outside of interrupt context */
 	schedule_work(&adapter->dma_err_task);
 }
@@ -10051,6 +10085,12 @@
 		igb_init_mas(adapter);
 
 #ifdef NO_KNI
+#ifdef HAVE_TIMER_SETUP
+	timer_setup(&adapter->watchdog_timer, &igb_watchdog, 0);
+	if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+		timer_setup(&adapter->dma_err_timer, &igb_dma_err_timer, 0);
+	timer_setup(&adapter->phy_info_timer, &igb_update_phy_info, 0);
+#else
 	setup_timer(&adapter->watchdog_timer, &igb_watchdog,
 	            (unsigned long) adapter);
 	if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
@@ -10058,6 +10098,7 @@
 			    (unsigned long) adapter);
 	setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
 	            (unsigned long) adapter);
+#endif
 
 	INIT_WORK(&adapter->reset_task, igb_reset_task);
 	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
diff -Nru dpdk-16.11.4/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h dpdk-16.11.6/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
--- dpdk-16.11.4/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h	2018-04-19 15:01:06.000000000 +0100
@@ -3937,4 +3937,8 @@
 #define HAVE_PCI_ENABLE_MSIX
 #endif
 
+#if defined(timer_setup) && defined(from_timer)
+#define HAVE_TIMER_SETUP
+#endif
+
 #endif /* _KCOMPAT_H_ */
diff -Nru dpdk-16.11.4/lib/librte_ether/rte_ethdev.c dpdk-16.11.6/lib/librte_ether/rte_ethdev.c
--- dpdk-16.11.4/lib/librte_ether/rte_ethdev.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_ether/rte_ethdev.c	2018-04-19 15:01:06.000000000 +0100
@@ -94,6 +94,7 @@
 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
+	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
diff -Nru dpdk-16.11.4/lib/librte_ether/rte_ethdev.h dpdk-16.11.6/lib/librte_ether/rte_ethdev.h
--- dpdk-16.11.4/lib/librte_ether/rte_ethdev.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_ether/rte_ethdev.h	2018-04-19 15:01:06.000000000 +0100
@@ -262,17 +262,17 @@
 struct rte_eth_link {
 	uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
 	uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-	uint16_t link_autoneg : 1;  /**< ETH_LINK_SPEED_[AUTONEG/FIXED] */
+	uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
 	uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
 } __attribute__((aligned(8)));      /**< aligned for atomic64 read/write */
 
 /* Utility constants */
-#define ETH_LINK_HALF_DUPLEX    0 /**< Half-duplex connection. */
-#define ETH_LINK_FULL_DUPLEX    1 /**< Full-duplex connection. */
-#define ETH_LINK_DOWN           0 /**< Link is down. */
-#define ETH_LINK_UP             1 /**< Link is up. */
-#define ETH_LINK_FIXED          0 /**< No autonegotiation. */
-#define ETH_LINK_AUTONEG        1 /**< Autonegotiated. */
+#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
 
 /**
  * A structure used to configure the ring threshold registers of an RX/TX
@@ -1694,7 +1694,7 @@
 	enum rte_kernel_driver kdrv;    /**< Kernel driver passthrough */
 	int numa_node;  /**< NUMA node connection */
 	const char *drv_name;   /**< Driver name */
-};
+} __rte_cache_aligned;
 
 /** Device supports hotplug detach */
 #define RTE_ETH_DEV_DETACHABLE   0x0001
@@ -1965,7 +1965,7 @@
  *   the DMA memory allocated for the transmit descriptors of the ring.
  * @param tx_conf
  *   The pointer to the configuration data to be used for the transmit queue.
- *   NULL value is allowed, in which case default RX configuration
+ *   NULL value is allowed, in which case default TX configuration
  *   will be used.
  *   The *tx_conf* structure contains the following data:
  *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
diff -Nru dpdk-16.11.4/lib/librte_lpm/rte_lpm.c dpdk-16.11.6/lib/librte_lpm/rte_lpm.c
--- dpdk-16.11.4/lib/librte_lpm/rte_lpm.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_lpm/rte_lpm.c	2018-04-19 15:01:06.000000000 +0100
@@ -908,7 +908,7 @@
 		 */
 
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-			{ .group_idx = (uint8_t)tbl8_group_index, },
+			.group_idx = (uint8_t)tbl8_group_index,
 			.valid = VALID,
 			.valid_group = 1,
 			.depth = 0,
@@ -954,7 +954,7 @@
 		 */
 
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-				{ .group_idx = (uint8_t)tbl8_group_index, },
+				.group_idx = (uint8_t)tbl8_group_index,
 				.valid = VALID,
 				.valid_group = 1,
 				.depth = 0,
@@ -1361,7 +1361,7 @@
 		 */
 
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-			{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+			.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
 			.valid = VALID,
 			.valid_group = 0,
 			.depth = sub_rule_depth,
@@ -1664,7 +1664,7 @@
 	} else if (tbl8_recycle_index > -1) {
 		/* Update tbl24 entry. */
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-			{ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+			.next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
 			.valid = VALID,
 			.valid_group = 0,
 			.depth = lpm->tbl8[tbl8_recycle_index].depth,
diff -Nru dpdk-16.11.4/lib/librte_mbuf/rte_mbuf.h dpdk-16.11.6/lib/librte_mbuf/rte_mbuf.h
--- dpdk-16.11.4/lib/librte_mbuf/rte_mbuf.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_mbuf/rte_mbuf.h	2018-04-19 15:01:06.000000000 +0100
@@ -1233,13 +1233,14 @@
  * segment is added back into its original mempool.
  *
  * @param m
- *   The packet mbuf to be freed.
+ *   The packet mbuf to be freed. If NULL, the function does nothing.
  */
 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 {
 	struct rte_mbuf *m_next;
 
-	__rte_mbuf_sanity_check(m, 1);
+	if (m != NULL)
+		__rte_mbuf_sanity_check(m, 1);
 
 	while (m != NULL) {
 		m_next = m->next;
@@ -1361,12 +1362,10 @@
  */
 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
 {
-	struct rte_mbuf *m2 = (struct rte_mbuf *)m;
-
 	__rte_mbuf_sanity_check(m, 1);
-	while (m2->next != NULL)
-		m2 = m2->next;
-	return m2;
+	while (m->next != NULL)
+		m = m->next;
+	return m;
 }
 
 /**
diff -Nru dpdk-16.11.4/lib/librte_pdump/rte_pdump.c dpdk-16.11.6/lib/librte_pdump/rte_pdump.c
--- dpdk-16.11.4/lib/librte_pdump/rte_pdump.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_pdump/rte_pdump.c	2018-04-19 15:01:06.000000000 +0100
@@ -582,7 +582,7 @@
 	if (ret != 0) {
 		RTE_LOG(ERR, PDUMP,
 			"Failed to create the pdump thread:%s, %s:%d\n",
-			strerror(errno), __func__, __LINE__);
+			strerror(ret), __func__, __LINE__);
 		return -1;
 	}
 	/* Set thread_name for aid in debugging. */
@@ -605,7 +605,7 @@
 	if (ret != 0) {
 		RTE_LOG(ERR, PDUMP,
 			"Failed to cancel the pdump thread:%s, %s:%d\n",
-			strerror(errno), __func__, __LINE__);
+			strerror(ret), __func__, __LINE__);
 		return -1;
 	}
 
diff -Nru dpdk-16.11.4/lib/librte_vhost/socket.c dpdk-16.11.6/lib/librte_vhost/socket.c
--- dpdk-16.11.4/lib/librte_vhost/socket.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_vhost/socket.c	2018-04-19 15:01:06.000000000 +0100
@@ -438,7 +438,7 @@
 
 	ret = pthread_create(&reconn_tid, NULL,
 			     vhost_user_client_reconnect, NULL);
-	if (ret < 0)
+	if (ret != 0)
 		RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
 
 	return ret;
@@ -525,7 +525,7 @@
 	if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
 		vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
 		if (vsocket->reconnect && reconn_tid == 0) {
-			if (vhost_user_reconnect_init() < 0) {
+			if (vhost_user_reconnect_init() != 0) {
 				free(vsocket->path);
 				free(vsocket);
 				goto out;
diff -Nru dpdk-16.11.4/lib/librte_vhost/vhost.c dpdk-16.11.6/lib/librte_vhost/vhost.c
--- dpdk-16.11.4/lib/librte_vhost/vhost.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_vhost/vhost.c	2018-04-19 15:01:06.000000000 +0100
@@ -202,6 +202,8 @@
 	dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
 
 	init_vring_queue_pair(dev, qp_idx);
+	rte_spinlock_init(&dev->virtqueue[virt_rx_q_idx]->access_lock);
+	rte_spinlock_init(&dev->virtqueue[virt_tx_q_idx]->access_lock);
 
 	dev->virt_qp_nb += 1;
 
diff -Nru dpdk-16.11.4/lib/librte_vhost/vhost.h dpdk-16.11.6/lib/librte_vhost/vhost.h
--- dpdk-16.11.4/lib/librte_vhost/vhost.h	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_vhost/vhost.h	2018-04-19 15:01:06.000000000 +0100
@@ -91,6 +91,8 @@
 
 	/* Backend value to determine if device should started/stopped */
 	int			backend;
+	rte_spinlock_t		access_lock;
+
 	/* Used to notify the guest (trigger interrupt) */
 	int			callfd;
 	/* Currently unused as polling mode is enabled */
@@ -223,19 +225,24 @@
 
 /* Convert guest physical Address to host virtual address */
 static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t gpa)
+gpa_to_vva(struct virtio_net *dev, uint64_t gpa, uint64_t *len)
 {
-	struct virtio_memory_region *reg;
+	struct virtio_memory_region *r;
 	uint32_t i;
 
 	for (i = 0; i < dev->mem->nregions; i++) {
-		reg = &dev->mem->regions[i];
-		if (gpa >= reg->guest_phys_addr &&
-		    gpa <  reg->guest_phys_addr + reg->size) {
-			return gpa - reg->guest_phys_addr +
-			       reg->host_user_addr;
+		r = &dev->mem->regions[i];
+		if (gpa >= r->guest_phys_addr &&
+		    gpa <  r->guest_phys_addr + r->size) {
+
+			if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
+				*len = r->guest_phys_addr + r->size - gpa;
+
+			return gpa - r->guest_phys_addr +
+			       r->host_user_addr;
 		}
 	}
+	*len = 0;
 
 	return 0;
 }
diff -Nru dpdk-16.11.4/lib/librte_vhost/vhost_user.c dpdk-16.11.6/lib/librte_vhost/vhost_user.c
--- dpdk-16.11.4/lib/librte_vhost/vhost_user.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_vhost/vhost_user.c	2018-04-19 15:01:06.000000000 +0100
@@ -39,6 +39,7 @@
 #include <sys/mman.h>
 #include <sys/types.h>
 #include <sys/stat.h>
+#include <stdbool.h>
 #include <assert.h>
 #ifdef RTE_LIBRTE_VHOST_NUMA
 #include <numaif.h>
@@ -302,21 +303,26 @@
  * used to convert the ring addresses to our address space.
  */
 static uint64_t
-qva_to_vva(struct virtio_net *dev, uint64_t qva)
+qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
 {
-	struct virtio_memory_region *reg;
+	struct virtio_memory_region *r;
 	uint32_t i;
 
 	/* Find the region where the address lives. */
 	for (i = 0; i < dev->mem->nregions; i++) {
-		reg = &dev->mem->regions[i];
+		r = &dev->mem->regions[i];
+
+		if (qva >= r->guest_user_addr &&
+		    qva <  r->guest_user_addr + r->size) {
+
+			if (unlikely(*len > r->guest_user_addr + r->size - qva))
+				*len = r->guest_user_addr + r->size - qva;
 
-		if (qva >= reg->guest_user_addr &&
-		    qva <  reg->guest_user_addr + reg->size) {
-			return qva - reg->guest_user_addr +
-			       reg->host_user_addr;
+			return qva - r->guest_user_addr +
+			       r->host_user_addr;
 		}
 	}
+	*len = 0;
 
 	return 0;
 }
@@ -326,9 +332,12 @@
  * This function then converts these to our address space.
  */
 static int
-vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
+vhost_user_set_vring_addr(struct virtio_net **pdev,
+						  struct vhost_vring_addr *addr)
 {
 	struct vhost_virtqueue *vq;
+	struct virtio_net *dev = *pdev;
+	uint64_t size, req_size;
 
 	if (dev->mem == NULL)
 		return -1;
@@ -337,30 +346,39 @@
 	vq = dev->virtqueue[addr->index];
 
 	/* The addresses are converted from QEMU virtual to Vhost virtual. */
+	req_size = sizeof(struct vring_desc) * vq->size;
+	size = req_size;
 	vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
-			addr->desc_user_addr);
-	if (vq->desc == 0) {
+			addr->desc_user_addr, &size);
+	if (vq->desc == 0 || size != req_size) {
 		RTE_LOG(ERR, VHOST_CONFIG,
-			"(%d) failed to find desc ring address.\n",
+			"(%d) failed to map desc ring address.\n",
 			dev->vid);
 		return -1;
 	}
 
 	dev = numa_realloc(dev, addr->index);
+	*pdev = dev;
+
 	vq = dev->virtqueue[addr->index];
 
+	req_size = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+	size = req_size;
 	vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
-			addr->avail_user_addr);
-	if (vq->avail == 0) {
+			addr->avail_user_addr, &size);
+	if (vq->avail == 0 || size != req_size) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"(%d) failed to find avail ring address.\n",
 			dev->vid);
 		return -1;
 	}
 
+	req_size = sizeof(struct vring_used);
+	req_size += sizeof(struct vring_used_elem) * vq->size;
+	size = req_size;
 	vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
-			addr->used_user_addr);
-	if (vq->used == 0) {
+			addr->used_user_addr, &size);
+	if (vq->used == 0 || size != req_size) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"(%d) failed to find used ring address.\n",
 			dev->vid);
@@ -488,6 +506,30 @@
 #define dump_guest_pages(dev)
 #endif
 
+static bool
+vhost_memory_changed(struct VhostUserMemory *new,
+		      struct virtio_memory *old)
+{
+	uint32_t i;
+
+	if (new->nregions != old->nregions)
+		return true;
+
+	for (i = 0; i < new->nregions; ++i) {
+		VhostUserMemoryRegion *new_r = &new->regions[i];
+		struct virtio_memory_region *old_r = &old->regions[i];
+
+		if (new_r->guest_phys_addr != old_r->guest_phys_addr)
+			return true;
+		if (new_r->memory_size != old_r->size)
+			return true;
+		if (new_r->userspace_addr != old_r->guest_user_addr)
+			return true;
+	}
+
+	return false;
+}
+
 static int
 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
 {
@@ -500,6 +542,16 @@
 	uint32_t i;
 	int fd;
 
+	if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
+		RTE_LOG(INFO, VHOST_CONFIG,
+			"(%d) memory regions not changed\n", dev->vid);
+
+		for (i = 0; i < memory.nregions; i++)
+			close(pmsg->fds[i]);
+
+		return 0;
+	}
+
 	/* Remove from the data plane. */
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
@@ -917,12 +969,47 @@
 	return ret;
 }
 
+static void
+vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+{
+	unsigned int i = 0;
+	unsigned int vq_num = 0;
+
+	while (vq_num < dev->virt_qp_nb * 2) {
+		struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+		if (vq) {
+			rte_spinlock_lock(&vq->access_lock);
+			vq_num++;
+		}
+		i++;
+	}
+}
+
+static void
+vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+{
+	unsigned int i = 0;
+	unsigned int vq_num = 0;
+
+	while (vq_num < dev->virt_qp_nb * 2) {
+		struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+		if (vq) {
+			rte_spinlock_unlock(&vq->access_lock);
+			vq_num++;
+		}
+		i++;
+	}
+}
+
 int
 vhost_user_msg_handler(int vid, int fd)
 {
 	struct virtio_net *dev;
 	struct VhostUserMsg msg;
 	int ret;
+	int unlock_required = 0;
 
 	dev = get_device(vid);
 	if (dev == NULL)
@@ -945,6 +1032,37 @@
 
 	RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
 		vhost_message_str[msg.request]);
+
+	/*
+	 * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
+	 * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
+	 * and device is destroyed. destroy_device waits for queues to be
+	 * inactive, so it is safe. Otherwise taking the access_lock
+	 * would cause a dead lock.
+	 */
+	switch (msg.request) {
+	case VHOST_USER_SET_FEATURES:
+	case VHOST_USER_SET_PROTOCOL_FEATURES:
+	case VHOST_USER_SET_OWNER:
+	case VHOST_USER_SET_MEM_TABLE:
+	case VHOST_USER_SET_LOG_BASE:
+	case VHOST_USER_SET_LOG_FD:
+	case VHOST_USER_SET_VRING_NUM:
+	case VHOST_USER_SET_VRING_ADDR:
+	case VHOST_USER_SET_VRING_BASE:
+	case VHOST_USER_SET_VRING_KICK:
+	case VHOST_USER_SET_VRING_CALL:
+	case VHOST_USER_SET_VRING_ERR:
+	case VHOST_USER_SET_VRING_ENABLE:
+	case VHOST_USER_SEND_RARP:
+		vhost_user_lock_all_queue_pairs(dev);
+		unlock_required = 1;
+		break;
+	default:
+		break;
+
+	}
+
 	switch (msg.request) {
 	case VHOST_USER_GET_FEATURES:
 		msg.payload.u64 = vhost_user_get_features();
@@ -991,7 +1109,7 @@
 		vhost_user_set_vring_num(dev, &msg.payload.state);
 		break;
 	case VHOST_USER_SET_VRING_ADDR:
-		vhost_user_set_vring_addr(dev, &msg.payload.addr);
+		vhost_user_set_vring_addr(&dev, &msg.payload.addr);
 		break;
 	case VHOST_USER_SET_VRING_BASE:
 		vhost_user_set_vring_base(dev, &msg.payload.state);
@@ -1034,5 +1152,8 @@
 
 	}
 
+	if (unlock_required)
+		vhost_user_unlock_all_queue_pairs(dev);
+
 	return 0;
 }
diff -Nru dpdk-16.11.4/lib/librte_vhost/virtio_net.c dpdk-16.11.6/lib/librte_vhost/virtio_net.c
--- dpdk-16.11.4/lib/librte_vhost/virtio_net.c	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/lib/librte_vhost/virtio_net.c	2018-04-19 15:01:06.000000000 +0100
@@ -44,6 +44,8 @@
 #include <rte_udp.h>
 #include <rte_sctp.h>
 #include <rte_arp.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
 
 #include "vhost.h"
 
@@ -100,6 +102,44 @@
 	return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
 }
 
+static inline struct vring_desc *__attribute__((always_inline))
+alloc_copy_ind_table(struct virtio_net *dev, struct vring_desc *desc)
+{
+	struct vring_desc *idesc;
+	uint64_t src, dst;
+	uint64_t len, remain = desc->len;
+	uint64_t desc_addr = desc->addr;
+
+	idesc = rte_malloc(__func__, desc->len, 0);
+	if (unlikely(!idesc))
+		return 0;
+
+	dst = (uint64_t)(uintptr_t)idesc;
+
+	while (remain) {
+		len = remain;
+		src = gpa_to_vva(dev, desc_addr, &len);
+		if (unlikely(!src || !len)) {
+			rte_free(idesc);
+			return 0;
+		}
+
+		rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+		remain -= len;
+		dst += len;
+		desc_addr += len;
+	}
+
+	return idesc;
+}
+
+static inline void __attribute__((always_inline))
+free_ind_table(struct vring_desc *idesc)
+{
+	rte_free(idesc);
+}
+
 static inline void __attribute__((always_inline))
 do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			  uint16_t to, uint16_t from, uint16_t size)
@@ -215,14 +255,17 @@
 	uint32_t desc_avail, desc_offset;
 	uint32_t mbuf_avail, mbuf_offset;
 	uint32_t cpy_len;
+	uint64_t desc_chunck_len;
 	struct vring_desc *desc;
-	uint64_t desc_addr;
+	uint64_t desc_addr, desc_gaddr;
 	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
 	/* A counter to avoid desc dead loop chain */
 	uint16_t nr_desc = 1;
 
 	desc = &descs[desc_idx];
-	desc_addr = gpa_to_vva(dev, desc->addr);
+	desc_chunck_len = desc->len;
+	desc_gaddr = desc->addr;
+	desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
 	/*
 	 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
 	 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
@@ -234,12 +277,51 @@
 	rte_prefetch0((void *)(uintptr_t)desc_addr);
 
 	virtio_enqueue_offload(m, &virtio_hdr.hdr);
-	copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
-	vhost_log_write(dev, desc->addr, dev->vhost_hlen);
-	PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+	if (likely(desc_chunck_len >= dev->vhost_hlen)) {
+		copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+
+		virtio_enqueue_offload(m,
+				(struct virtio_net_hdr *)(uintptr_t)desc_addr);
+		PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+	} else {
+		uint64_t remain = dev->vhost_hlen;
+		uint64_t len;
+		uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
+		uint64_t guest_addr = desc_gaddr;
+
+		while (remain) {
+			len = remain;
+			dst = gpa_to_vva(dev, guest_addr, &len);
+			if (unlikely(!dst || !len))
+				return -1;
+
+			rte_memcpy((void *)(uintptr_t)dst,
+					(void *)(uintptr_t)src, len);
+
+			PRINT_PACKET(dev, (uintptr_t)dst, len, 0);
+			remain -= len;
+			guest_addr += len;
+			dst += len;
+		}
+	}
+
+	vhost_log_write(dev, desc_gaddr, dev->vhost_hlen);
 
-	desc_offset = dev->vhost_hlen;
 	desc_avail  = desc->len - dev->vhost_hlen;
+	if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
+		desc_chunck_len = desc_avail;
+		desc_gaddr += dev->vhost_hlen;
+		desc_addr = gpa_to_vva(dev,
+				desc_gaddr,
+				&desc_chunck_len);
+		if (unlikely(!desc_addr))
+			return -1;
+
+		desc_offset = 0;
+	} else {
+		desc_offset = dev->vhost_hlen;
+		desc_chunck_len -= dev->vhost_hlen;
+	}
 
 	mbuf_avail  = rte_pktmbuf_data_len(m);
 	mbuf_offset = 0;
@@ -262,19 +344,31 @@
 				return -1;
 
 			desc = &descs[desc->next];
-			desc_addr = gpa_to_vva(dev, desc->addr);
+			desc_chunck_len = desc->len;
+			desc_gaddr = desc->addr;
+			desc_addr = gpa_to_vva(dev,
+					desc_gaddr, &desc_chunck_len);
 			if (unlikely(!desc_addr))
 				return -1;
 
 			desc_offset = 0;
 			desc_avail  = desc->len;
+		} else if (unlikely(desc_chunck_len == 0)) {
+			desc_chunck_len = desc_avail;
+			desc_gaddr += desc_offset;
+			desc_addr = gpa_to_vva(dev,
+					desc_gaddr, &desc_chunck_len);
+			if (unlikely(!desc_addr))
+				return -1;
+
+			desc_offset = 0;
 		}
 
 		cpy_len = RTE_MIN(desc_avail, mbuf_avail);
 		rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
 			rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
 			cpy_len);
-		vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
+		vhost_log_write(dev, desc_gaddr + desc_offset, cpy_len);
 		PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
 			     cpy_len, 0);
 
@@ -282,6 +376,7 @@
 		mbuf_offset += cpy_len;
 		desc_avail  -= cpy_len;
 		desc_offset += cpy_len;
+		desc_chunck_len -= cpy_len;
 	}
 
 	return 0;
@@ -304,6 +399,7 @@
 	struct vring_desc *descs;
 	uint16_t used_idx;
 	uint32_t i, sz;
+	uint64_t dlen;
 
 	LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
@@ -313,8 +409,11 @@
 	}
 
 	vq = dev->virtqueue[queue_id];
+
+	rte_spinlock_lock(&vq->access_lock);
+
 	if (unlikely(vq->enabled == 0))
-		return 0;
+		goto out_access_unlock;
 
 	avail_idx = *((volatile uint16_t *)&vq->avail->idx);
 	start_idx = vq->last_used_idx;
@@ -322,7 +421,7 @@
 	count = RTE_MIN(count, free_entries);
 	count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
 	if (count == 0)
-		return 0;
+		goto out_access_unlock;
 
 	LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
 		dev->vid, start_idx, start_idx + count);
@@ -342,17 +441,32 @@
 
 	rte_prefetch0(&vq->desc[desc_indexes[0]]);
 	for (i = 0; i < count; i++) {
+		struct vring_desc *idesc = NULL;
 		uint16_t desc_idx = desc_indexes[i];
 		int err;
 
 		if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+			dlen = vq->desc[desc_idx].len;
 			descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
-					vq->desc[desc_idx].addr);
+					vq->desc[desc_idx].addr, &dlen);
 			if (unlikely(!descs)) {
 				count = i;
 				break;
 			}
 
+			if (unlikely(dlen < vq->desc[desc_idx].len)) {
+				/*
+				 * The indirect desc table is not contiguous
+				 * in process VA space, we have to copy it.
+				 */
+				idesc = alloc_copy_ind_table(dev,
+							&vq->desc[desc_idx]);
+				if (unlikely(!idesc))
+					break;
+
+				descs = idesc;
+			}
+
 			desc_idx = 0;
 			sz = vq->desc[desc_idx].len / sizeof(*descs);
 		} else {
@@ -371,6 +485,9 @@
 
 		if (i + 1 < count)
 			rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
+
+		if (unlikely(!!idesc))
+			free_ind_table(idesc);
 	}
 
 	rte_smp_wmb();
@@ -388,6 +505,10 @@
 	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
 			&& (vq->callfd >= 0))
 		eventfd_write(vq->callfd, (eventfd_t)1);
+
+out_access_unlock:
+	rte_spinlock_unlock(&vq->access_lock);
+
 	return count;
 }
 
@@ -400,22 +521,40 @@
 	uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
 	uint32_t vec_id = *vec_idx;
 	uint32_t len    = 0;
+	uint64_t dlen;
 	struct vring_desc *descs = vq->desc;
+	struct vring_desc *idesc = NULL;
 
 	*desc_chain_head = idx;
 
 	if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+		dlen = vq->desc[idx].len;
 		descs = (struct vring_desc *)(uintptr_t)
-					gpa_to_vva(dev, vq->desc[idx].addr);
+					gpa_to_vva(dev, vq->desc[idx].addr,
+							   &dlen);
 		if (unlikely(!descs))
 			return -1;
 
+		if (unlikely(dlen < vq->desc[idx].len)) {
+			/*
+			 * The indirect desc table is not contiguous
+			 * in process VA space, we have to copy it.
+			 */
+			idesc = alloc_copy_ind_table(dev, &vq->desc[idx]);
+			if (unlikely(!idesc))
+				return -1;
+
+			descs = idesc;
+		}
+
 		idx = 0;
 	}
 
 	while (1) {
-		if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
+		if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) {
+			free_ind_table(idesc);
 			return -1;
+		}
 
 		len += descs[idx].len;
 		buf_vec[vec_id].buf_addr = descs[idx].addr;
@@ -432,6 +571,9 @@
 	*desc_chain_len = len;
 	*vec_idx = vec_id;
 
+	if (unlikely(!!idesc))
+		free_ind_table(idesc);
+
 	return 0;
 }
 
@@ -485,8 +627,10 @@
 			    struct buf_vector *buf_vec, uint16_t num_buffers)
 {
 	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+	struct virtio_net_hdr_mrg_rxbuf *hdr;
 	uint32_t vec_idx = 0;
-	uint64_t desc_addr;
+	uint64_t desc_addr, desc_gaddr;
+	uint64_t desc_chunck_len;
 	uint32_t mbuf_offset, mbuf_avail;
 	uint32_t desc_offset, desc_avail;
 	uint32_t cpy_len;
@@ -496,12 +640,19 @@
 	if (unlikely(m == NULL))
 		return -1;
 
-	desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
-	if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
+	desc_chunck_len = buf_vec[vec_idx].buf_len;
+	desc_gaddr = buf_vec[vec_idx].buf_addr;
+	desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
+	if (buf_vec[vec_idx].buf_len < dev->vhost_hlen ||
+			!desc_addr)
 		return -1;
 
 	hdr_mbuf = m;
 	hdr_addr = desc_addr;
+	if (unlikely(desc_chunck_len < dev->vhost_hlen))
+		hdr = &virtio_hdr;
+	else
+		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
 	hdr_phys_addr = buf_vec[vec_idx].buf_addr;
 	rte_prefetch0((void *)(uintptr_t)hdr_addr);
 
@@ -510,7 +661,21 @@
 		dev->vid, num_buffers);
 
 	desc_avail  = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
-	desc_offset = dev->vhost_hlen;
+	if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
+		desc_chunck_len = desc_avail;
+		desc_gaddr += dev->vhost_hlen;
+		desc_addr = gpa_to_vva(dev,
+				desc_gaddr,
+				&desc_chunck_len);
+		if (unlikely(!desc_addr))
+			return -1;
+
+		desc_offset = 0;
+	} else {
+		desc_offset = dev->vhost_hlen;
+		desc_chunck_len -= dev->vhost_hlen;
+	}
+
 
 	mbuf_avail  = rte_pktmbuf_data_len(m);
 	mbuf_offset = 0;
@@ -518,7 +683,10 @@
 		/* done with current desc buf, get the next one */
 		if (desc_avail == 0) {
 			vec_idx++;
-			desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+			desc_gaddr = buf_vec[vec_idx].buf_addr;
+			desc_chunck_len = buf_vec[vec_idx].buf_len;
+			desc_addr = gpa_to_vva(dev, desc_gaddr,
+					&desc_chunck_len);
 			if (unlikely(!desc_addr))
 				return -1;
 
@@ -526,6 +694,16 @@
 			rte_prefetch0((void *)(uintptr_t)desc_addr);
 			desc_offset = 0;
 			desc_avail  = buf_vec[vec_idx].buf_len;
+		} else if (unlikely(desc_chunck_len == 0)) {
+			desc_chunck_len = desc_avail;
+			desc_gaddr += desc_offset;
+			desc_addr = gpa_to_vva(dev,
+					desc_gaddr,
+					&desc_chunck_len);
+			if (unlikely(!desc_addr))
+				return -1;
+
+			desc_offset = 0;
 		}
 
 		/* done with current mbuf, get the next one */
@@ -538,7 +716,33 @@
 
 		if (hdr_addr) {
 			virtio_enqueue_offload(hdr_mbuf, &virtio_hdr.hdr);
-			copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr);
+			if (likely(hdr != &virtio_hdr)) {
+				copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr);
+			} else {
+				uint64_t len;
+				uint64_t remain = dev->vhost_hlen;
+				uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr;
+				uint64_t dst;
+				uint64_t guest_addr = hdr_phys_addr;
+
+				while (remain) {
+					len = remain;
+					dst = gpa_to_vva(dev, guest_addr, &len);
+					if (unlikely(!dst || !len))
+						return -1;
+
+					rte_memcpy((void *)(uintptr_t)dst,
+							(void *)(uintptr_t)src,
+							len);
+
+					PRINT_PACKET(dev, (uintptr_t)dst,
+							len, 0);
+
+					remain -= len;
+					guest_addr += len;
+					dst += len;
+				}
+			}
 			vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
 			PRINT_PACKET(dev, (uintptr_t)hdr_addr,
 				     dev->vhost_hlen, 0);
@@ -546,12 +750,11 @@
 			hdr_addr = 0;
 		}
 
-		cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+		cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
 		rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
 			rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
 			cpy_len);
-		vhost_log_write(dev, buf_vec[vec_idx].buf_addr + desc_offset,
-			cpy_len);
+		vhost_log_write(dev, desc_gaddr + desc_offset, cpy_len);
 		PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
 			cpy_len, 0);
 
@@ -559,6 +762,7 @@
 		mbuf_offset += cpy_len;
 		desc_avail  -= cpy_len;
 		desc_offset += cpy_len;
+		desc_chunck_len -= cpy_len;
 	}
 
 	return 0;
@@ -582,12 +786,15 @@
 	}
 
 	vq = dev->virtqueue[queue_id];
+
+	rte_spinlock_lock(&vq->access_lock);
+
 	if (unlikely(vq->enabled == 0))
-		return 0;
+		goto out_access_unlock;
 
 	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
 	if (count == 0)
-		return 0;
+		goto out_access_unlock;
 
 	rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
@@ -631,6 +838,9 @@
 			eventfd_write(vq->callfd, (eventfd_t)1);
 	}
 
+out_access_unlock:
+	rte_spinlock_unlock(&vq->access_lock);
+
 	return pkt_idx;
 }
 
@@ -809,11 +1019,13 @@
 		  struct rte_mempool *mbuf_pool)
 {
 	struct vring_desc *desc;
-	uint64_t desc_addr;
+	uint64_t desc_addr, desc_gaddr;
 	uint32_t desc_avail, desc_offset;
 	uint32_t mbuf_avail, mbuf_offset;
 	uint32_t cpy_len;
+	uint64_t desc_chunck_len;
 	struct rte_mbuf *cur = m, *prev = m;
+	struct virtio_net_hdr tmp_hdr;
 	struct virtio_net_hdr *hdr = NULL;
 	/* A counter to avoid desc dead loop chain */
 	uint32_t nr_desc = 1;
@@ -823,13 +1035,43 @@
 			(desc->flags & VRING_DESC_F_INDIRECT))
 		return -1;
 
-	desc_addr = gpa_to_vva(dev, desc->addr);
+	desc_chunck_len = desc->len;
+	desc_gaddr = desc->addr;
+	desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
 	if (unlikely(!desc_addr))
 		return -1;
 
 	if (virtio_net_with_host_offload(dev)) {
-		hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
-		rte_prefetch0(hdr);
+		if (unlikely(desc_chunck_len < sizeof(struct virtio_net_hdr))) {
+			uint64_t len = desc_chunck_len;
+			uint64_t remain = sizeof(struct virtio_net_hdr);
+			uint64_t src = desc_addr;
+			uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
+			uint64_t guest_addr = desc_gaddr;
+
+			/*
+			 * No luck, the virtio-net header doesn't fit
+			 * in a contiguous virtual area.
+			 */
+			while (remain) {
+				len = remain;
+				src = gpa_to_vva(dev, guest_addr, &len);
+				if (unlikely(!src || !len))
+					return -1;
+
+				rte_memcpy((void *)(uintptr_t)dst,
+						   (void *)(uintptr_t)src, len);
+
+				guest_addr += len;
+				remain -= len;
+				dst += len;
+			}
+
+			hdr = &tmp_hdr;
+		} else {
+			hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
+			rte_prefetch0(hdr);
+		}
 	}
 
 	/*
@@ -843,7 +1085,9 @@
 		if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
 			return -1;
 
-		desc_addr = gpa_to_vva(dev, desc->addr);
+		desc_chunck_len = desc->len;
+		desc_gaddr = desc->addr;
+		desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
 		if (unlikely(!desc_addr))
 			return -1;
 
@@ -852,19 +1096,34 @@
 		nr_desc    += 1;
 	} else {
 		desc_avail  = desc->len - dev->vhost_hlen;
-		desc_offset = dev->vhost_hlen;
+
+		if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
+			desc_chunck_len = desc_avail;
+			desc_gaddr += dev->vhost_hlen;
+			desc_addr = gpa_to_vva(dev,
+					desc_gaddr,
+					&desc_chunck_len);
+			if (unlikely(!desc_addr))
+				return -1;
+
+			desc_offset = 0;
+		} else {
+			desc_offset = dev->vhost_hlen;
+			desc_chunck_len -= dev->vhost_hlen;
+		}
 	}
 
 	rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
 
-	PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
+	PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+			desc_chunck_len, 0);
 
 	mbuf_offset = 0;
 	mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
 	while (1) {
 		uint64_t hpa;
 
-		cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+		cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
 
 		/*
 		 * A desc buf might across two host physical pages that are
@@ -872,10 +1131,11 @@
 		 * will be copied even though zero copy is enabled.
 		 */
 		if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
-					desc->addr + desc_offset, cpy_len)))) {
+					desc_gaddr + desc_offset, cpy_len)))) {
 			cur->data_len = cpy_len;
 			cur->data_off = 0;
-			cur->buf_addr = (void *)(uintptr_t)desc_addr;
+			cur->buf_addr = (void *)(uintptr_t)(desc_gaddr
+					+ desc_offset);
 			cur->buf_physaddr = hpa;
 
 			/*
@@ -893,6 +1153,7 @@
 		mbuf_avail  -= cpy_len;
 		mbuf_offset += cpy_len;
 		desc_avail  -= cpy_len;
+		desc_chunck_len -= cpy_len;
 		desc_offset += cpy_len;
 
 		/* This desc reaches to its end, get the next one */
@@ -907,7 +1168,10 @@
 			if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
 				return -1;
 
-			desc_addr = gpa_to_vva(dev, desc->addr);
+			desc_chunck_len = desc->len;
+			desc_gaddr = desc->addr;
+			desc_addr = gpa_to_vva(dev, desc_gaddr,
+					&desc_chunck_len);
 			if (unlikely(!desc_addr))
 				return -1;
 
@@ -916,7 +1180,21 @@
 			desc_offset = 0;
 			desc_avail  = desc->len;
 
-			PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
+			PRINT_PACKET(dev, (uintptr_t)desc_addr,
+					desc_chunck_len, 0);
+		} else if (unlikely(desc_chunck_len == 0)) {
+			desc_chunck_len = desc_avail;
+			desc_gaddr += desc_offset;
+			desc_addr = gpa_to_vva(dev,
+					desc_gaddr,
+					&desc_chunck_len);
+			if (unlikely(!desc_addr))
+				return -1;
+
+			desc_offset = 0;
+
+			PRINT_PACKET(dev, (uintptr_t)desc_addr,
+					desc_chunck_len, 0);
 		}
 
 		/*
@@ -1027,6 +1305,22 @@
 	return true;
 }
 
+static inline void __attribute__((always_inline))
+restore_mbuf(struct rte_mbuf *m)
+{
+	uint32_t mbuf_size, priv_size;
+
+	while (m) {
+		priv_size = rte_pktmbuf_priv_size(m->pool);
+		mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+		/* start of buffer is after mbuf structure and priv data */
+
+		m->buf_addr = (char *)m + mbuf_size;
+		m->buf_physaddr = rte_mempool_virt2phy(NULL, m) + mbuf_size;
+		m = m->next;
+	}
+}
+
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -1051,9 +1345,13 @@
 	}
 
 	vq = dev->virtqueue[queue_id];
-	if (unlikely(vq->enabled == 0))
+
+	if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
 		return 0;
 
+	if (unlikely(vq->enabled == 0))
+		goto out_access_unlock;
+
 	if (unlikely(dev->dequeue_zero_copy)) {
 		struct zcopy_mbuf *zmbuf, *next;
 		int nr_updated = 0;
@@ -1069,6 +1367,7 @@
 				nr_updated += 1;
 
 				TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+				restore_mbuf(zmbuf->mbuf);
 				rte_pktmbuf_free(zmbuf->mbuf);
 				put_zmbuf(zmbuf);
 				vq->nr_zmbuf -= 1;
@@ -1102,7 +1401,7 @@
 		if (rarp_mbuf == NULL) {
 			RTE_LOG(ERR, VHOST_DATA,
 				"Failed to allocate memory for mbuf.\n");
-			return 0;
+			goto out_access_unlock;
 		}
 
 		if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
@@ -1116,7 +1415,7 @@
 	free_entries = *((volatile uint16_t *)&vq->avail->idx) -
 			vq->last_avail_idx;
 	if (free_entries == 0)
-		goto out;
+		goto out_access_unlock;
 
 	LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 
@@ -1144,19 +1443,35 @@
 	/* Prefetch descriptor index. */
 	rte_prefetch0(&vq->desc[desc_indexes[0]]);
 	for (i = 0; i < count; i++) {
-		struct vring_desc *desc;
+		struct vring_desc *desc, *idesc = NULL;
 		uint16_t sz, idx;
+		uint64_t dlen;
 		int err;
 
 		if (likely(i + 1 < count))
 			rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
 
 		if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
+			dlen = vq->desc[desc_indexes[i]].len;
 			desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
-					vq->desc[desc_indexes[i]].addr);
+					vq->desc[desc_indexes[i]].addr,
+					&dlen);
 			if (unlikely(!desc))
 				break;
 
+			if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) {
+				/*
+				 * The indirect desc table is not contiguous
+				 * in process VA space, we have to copy it.
+				 */
+				idesc = alloc_copy_ind_table(dev,
+						&vq->desc[desc_indexes[i]]);
+				if (unlikely(!idesc))
+					break;
+
+				desc = idesc;
+			}
+
 			rte_prefetch0(desc);
 			sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
 			idx = 0;
@@ -1170,12 +1485,14 @@
 		if (unlikely(pkts[i] == NULL)) {
 			RTE_LOG(ERR, VHOST_DATA,
 				"Failed to allocate memory for mbuf.\n");
+			free_ind_table(idesc);
 			break;
 		}
 
 		err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool);
 		if (unlikely(err)) {
 			rte_pktmbuf_free(pkts[i]);
+			free_ind_table(idesc);
 			break;
 		}
 
@@ -1185,6 +1502,7 @@
 			zmbuf = get_zmbuf(vq);
 			if (!zmbuf) {
 				rte_pktmbuf_free(pkts[i]);
+				free_ind_table(idesc);
 				break;
 			}
 			zmbuf->mbuf = pkts[i];
@@ -1201,6 +1519,9 @@
 			vq->nr_zmbuf += 1;
 			TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
 		}
+
+		if (unlikely(!!idesc))
+			free_ind_table(idesc);
 	}
 	vq->last_avail_idx += i;
 
@@ -1209,7 +1530,9 @@
 		update_used_idx(dev, vq, i);
 	}
 
-out:
+out_access_unlock:
+	rte_spinlock_unlock(&vq->access_lock);
+
 	if (unlikely(rarp_mbuf != NULL)) {
 		/*
 		 * Inject it to the head of "pkts" array, so that switch's mac
diff -Nru dpdk-16.11.4/MAINTAINERS dpdk-16.11.6/MAINTAINERS
--- dpdk-16.11.4/MAINTAINERS	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/MAINTAINERS	2018-04-19 15:01:06.000000000 +0100
@@ -166,6 +166,7 @@
 M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
 F: lib/librte_eal/common/arch/ppc_64/
 F: lib/librte_eal/common/include/arch/ppc_64/
+F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
 
 Intel x86
 M: Bruce Richardson <bruce.richardson@intel.com>
diff -Nru dpdk-16.11.4/mk/internal/rte.extvars.mk dpdk-16.11.6/mk/internal/rte.extvars.mk
--- dpdk-16.11.4/mk/internal/rte.extvars.mk	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/mk/internal/rte.extvars.mk	2018-04-19 15:01:06.000000000 +0100
@@ -48,7 +48,7 @@
 RTE_EXTMK := $(abspath $(M))
 endif
 endif
-RTE_EXTMK ?= $(RTE_SRCDIR)/Makefile
+RTE_EXTMK ?= $(RTE_SRCDIR)/$(notdir $(firstword $(MAKEFILE_LIST)))
 export RTE_EXTMK
 
 # RTE_SDK_BIN must point to .config, include/ and lib/.
diff -Nru dpdk-16.11.4/pkg/dpdk.spec dpdk-16.11.6/pkg/dpdk.spec
--- dpdk-16.11.4/pkg/dpdk.spec	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/pkg/dpdk.spec	2018-04-19 15:01:06.000000000 +0100
@@ -30,7 +30,7 @@
 # OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Name: dpdk
-Version: 16.11.4
+Version: 16.11.6
 Release: 1
 Packager: packaging@6wind.com
 URL: http://dpdk.org
diff -Nru dpdk-16.11.4/tools/dpdk-devbind.py dpdk-16.11.6/tools/dpdk-devbind.py
--- dpdk-16.11.4/tools/dpdk-devbind.py	2017-12-08 16:21:04.000000000 +0000
+++ dpdk-16.11.6/tools/dpdk-devbind.py	2018-04-19 15:01:06.000000000 +0100
@@ -126,40 +126,6 @@
                             stderr=stderr).communicate()[0]
 
 
-def find_module(mod):
-    '''find the .ko file for kernel module named mod.
-    Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
-    modules directory and finally under the parent directory of
-    the script '''
-    # check $RTE_SDK/$RTE_TARGET directory
-    if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
-        path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],
-                                     os.environ['RTE_TARGET'], mod)
-        if exists(path):
-            return path
-
-    # check using depmod
-    try:
-        depmod_out = check_output(["modinfo", "-n", mod],
-                                  stderr=subprocess.STDOUT).lower()
-        if "error" not in depmod_out:
-            path = depmod_out.strip()
-            if exists(path):
-                return path
-    except:  # if modinfo can't find module, it fails, so continue
-        pass
-
-    # check for a copy based off current path
-    tools_dir = dirname(abspath(sys.argv[0]))
-    if (tools_dir.endswith("tools")):
-        base_dir = dirname(tools_dir)
-        find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
-        if len(find_out) > 0:  # something matched
-            path = find_out.splitlines()[0]
-            if exists(path):
-                return path
-
-
 def check_modules():
     '''Checks that igb_uio is loaded'''
     global dpdk_drivers

Attachment: signature.asc
Description: This is a digitally signed message part


--- End Message ---
--- Begin Message ---
Version: 9.5

Hi,

The update referenced by each of these bugs was included in this
morning's stretch point release.

Regards,

Adam

--- End Message ---

Reply to: