[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#1032441: marked as done (unblock: ovn/23.03.0-1)



Your message dated Tue, 07 Mar 2023 10:06:28 +0000
with message-id <E1pZUDE-003OXR-RM@respighi.debian.org>
and subject line unblock ovn
has caused the Debian Bug report #1032441,
regarding unblock: ovn/23.03.0-1
to be marked as done.

This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.

(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact owner@bugs.debian.org
immediately.)


-- 
1032441: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1032441
Debian Bug Tracking System
Contact owner@bugs.debian.org with problems
--- Begin Message ---
Package: release.debian.org
Severity: normal
User: release.debian.org@packages.debian.org
Usertags: unblock
X-Debbugs-CC: pkg-dpdk-devel@lists.alioth.debian.org zigo@debian.org

Please unblock package ovn

As discussed and agreed in:
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1024322

We have uploaded dpdk/ovs/ovn back in December, with git snapshots, to
ensure the ABI transition could be completed on time for the transition
freeze. This has worked well, and the only thing left to do is to
update src:ovn from the git snapshot to the final release, which was
tagged a couple of days ago. Only bug fix changes were merged since the
git snapshot that is currently in bookworm.

I have uploaded to unstable today, so with the 10 days migration delay
it will not migrate in time for the next freeze, hence filing this
unblock request.

Debdiff attached.

Thank you!

-- 
Kind regards,
Luca Boccassi
diff -Nru ovn-23.03.0~git20230221.038cfb1/.ci/ovn-kubernetes/Dockerfile ovn-23.03.0/.ci/ovn-kubernetes/Dockerfile
--- ovn-23.03.0~git20230221.038cfb1/.ci/ovn-kubernetes/Dockerfile	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/.ci/ovn-kubernetes/Dockerfile	2023-03-03 18:37:48.000000000 +0000
@@ -1,5 +1,5 @@
 ARG OVNKUBE_COMMIT=master
-ARG LIBOVSDB_COMMIT=8081fe24e48f
+ARG LIBOVSDB_COMMIT=a6a173993830
 
 FROM fedora:37 AS ovnbuilder
 
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/chassis.c ovn-23.03.0/controller/chassis.c
--- ovn-23.03.0~git20230221.038cfb1/controller/chassis.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/chassis.c	2023-03-03 18:37:48.000000000 +0000
@@ -99,9 +99,9 @@
 get_hostname(const struct smap *ext_ids, const char *chassis_id)
 {
     const char *hostname = get_chassis_external_id_value(ext_ids, chassis_id,
-                                                         "hostname", "");
+                                                         "hostname", NULL);
 
-    if (strlen(hostname) == 0) {
+    if (!hostname) {
         static char hostname_[HOST_NAME_MAX + 1];
 
         if (gethostname(hostname_, sizeof(hostname_))) {
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/encaps.c ovn-23.03.0/controller/encaps.c
--- ovn-23.03.0~git20230221.038cfb1/controller/encaps.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/encaps.c	2023-03-03 18:37:48.000000000 +0000
@@ -77,7 +77,7 @@
     for (int i = 0; i < UINT16_MAX; i++) {
         const char *idx = get_chassis_idx(tc->ovs_table);
         char *port_name = xasprintf(
-            "ovn%s-%.*s-%x", idx, strlen(idx) ? 5 : 6, chassis_id, i);
+            "ovn%s-%.*s-%x", idx, idx[0] ? 5 : 6, chassis_id, i);
 
         if (!sset_contains(&tc->port_names, port_name)) {
             return port_name;
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/lflow.c ovn-23.03.0/controller/lflow.c
--- ovn-23.03.0~git20230221.038cfb1/controller/lflow.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/lflow.c	2023-03-03 18:37:48.000000000 +0000
@@ -18,7 +18,6 @@
 #include "lflow.h"
 #include "coverage.h"
 #include "ha-chassis.h"
-#include "lib/id-pool.h"
 #include "lflow-cache.h"
 #include "local_data.h"
 #include "lport.h"
@@ -99,9 +98,9 @@
 
 static void
 consider_lb_hairpin_flows(const struct ovn_controller_lb *lb,
+                          const struct hmap *local_datapaths,
                           bool use_ct_mark,
-                          struct ovn_desired_flow_table *flow_table,
-                          struct simap *ids);
+                          struct ovn_desired_flow_table *flow_table);
 
 static void add_port_sec_flows(const struct shash *binding_lports,
                                const struct sbrec_chassis *,
@@ -1731,114 +1730,38 @@
 static void
 add_lb_ct_snat_hairpin_for_dp(const struct ovn_controller_lb *lb,
                               const struct sbrec_datapath_binding *datapath,
+                              const struct hmap *local_datapaths,
                               struct match *dp_match,
                               struct ofpbuf *dp_acts,
                               struct ovn_desired_flow_table *flow_table)
 {
-    match_set_metadata(dp_match, htonll(datapath->tunnel_key));
-    ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN, 200,
-                              lb->slb->header_.uuid.parts[0],
-                              dp_match, dp_acts, &lb->slb->header_.uuid,
-                              NX_CTLR_NO_METER, NULL);
-}
-
-static void
-add_lb_ct_snat_hairpin_dp_flows(const struct ovn_controller_lb *lb,
-                                uint32_t id,
-                                struct ovn_desired_flow_table *flow_table)
-{
-    /* If "hairpin_snat_ip" is not specified on this LB, we do not need
-       to add these flows because no conjunctive flows have been added
-       by add_lb_ct_snat_hairpin_vip_flow() for this LB. */
-    if (!lb->hairpin_snat_ips.n_ipv4_addrs &&
-        !lb->hairpin_snat_ips.n_ipv6_addrs) {
-        return;
-    }
-
-    uint64_t stub[1024 / 8];
-    struct ofpbuf dp_acts = OFPBUF_STUB_INITIALIZER(stub);
-    struct ofpact_conjunction *conj;
-
-    conj = ofpact_put_CONJUNCTION(&dp_acts);
-    conj->id = id;
-    conj->n_clauses = 2;
-    conj->clause = 0;
-
-    struct match dp_match = MATCH_CATCHALL_INITIALIZER;
-
-    for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
-        add_lb_ct_snat_hairpin_for_dp(lb, lb->slb->datapaths[i],
-                                      &dp_match, &dp_acts, flow_table);
-    }
-    if (lb->slb->datapath_group) {
-        for (size_t i = 0; i < lb->slb->datapath_group->n_datapaths; i++) {
-            add_lb_ct_snat_hairpin_for_dp(
-                lb, lb->slb->datapath_group->datapaths[i],
-                &dp_match, &dp_acts, flow_table);
+    if (datapath) {
+        if (!get_local_datapath(local_datapaths, datapath->tunnel_key)) {
+            return;
         }
+        match_set_metadata(dp_match, htonll(datapath->tunnel_key));
     }
 
-    ofpbuf_uninit(&dp_acts);
-
-    struct ofpbuf snat_acts = OFPBUF_STUB_INITIALIZER(stub);
-
-    struct ofpact_conntrack *ct = ofpact_put_CT(&snat_acts);
-    ct->recirc_table = NX_CT_RECIRC_NONE;
-    ct->zone_src.field = mf_from_id(MFF_LOG_SNAT_ZONE);
-    ct->zone_src.ofs = 0;
-    ct->zone_src.n_bits = 16;
-    ct->flags = NX_CT_F_COMMIT;
-    ct->alg = 0;
-
-    size_t nat_offset;
-    nat_offset = snat_acts.size;
-    ofpbuf_pull(&snat_acts, nat_offset);
-
-    struct ofpact_nat *nat = ofpact_put_NAT(&snat_acts);
-    nat->flags = NX_NAT_F_SRC;
-
-    snat_acts.header = ofpbuf_push_uninit(&snat_acts, nat_offset);
-    ofpact_finish(&snat_acts, &ct->ofpact);
-
-    struct match snat_match = MATCH_CATCHALL_INITIALIZER;
-
-    match_set_conj_id(&snat_match, id);
-
-    if (lb->hairpin_snat_ips.n_ipv4_addrs) {
-        nat->range_af = AF_INET;
-        nat->range.addr.ipv4.min = lb->hairpin_snat_ips.ipv4_addrs[0].addr;
-        match_set_dl_type(&snat_match, htons(ETH_TYPE_IP));
-
-        ofctrl_add_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN, 200,
-                    lb->slb->header_.uuid.parts[0],
-                    &snat_match, &snat_acts, &lb->slb->header_.uuid);
-    }
-
-    if (lb->hairpin_snat_ips.n_ipv6_addrs) {
-        nat->range_af = AF_INET6;
-        nat->range.addr.ipv6.min = lb->hairpin_snat_ips.ipv6_addrs[0].addr;
-        match_set_dl_type(&snat_match, htons(ETH_TYPE_IPV6));
-
-        ofctrl_add_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN, 200,
-                    lb->slb->header_.uuid.parts[0],
-                    &snat_match, &snat_acts, &lb->slb->header_.uuid);
-    }
-
-    ofpbuf_uninit(&snat_acts);
+    /* A flow added for the "hairpin_snat_ip" case will have an extra
+     * datapath match, but it will also match on the less restrictive
+     * general case.  Therefore, we set the priority in the
+     * "hairpin_snat_ip" case to be higher than the general case. */
+    ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN,
+                              datapath ? 200 : 100,
+                              lb->slb->header_.uuid.parts[0],
+                              dp_match, dp_acts, &lb->slb->header_.uuid,
+                              NX_CTLR_NO_METER, NULL);
 }
 
-
-/* Add a ct_snat flow for each VIP of the LB. If this LB does not use
+/* Add a ct_snat flow for each VIP of the LB.  If this LB does not use
  * "hairpin_snat_ip", we can SNAT using the VIP.
  *
- * If this LB uses "hairpin_snat_ip", we add a flow to one dimension of a
- * conjunctive flow 'id'. The other dimension consists of the datapaths
- * that this LB belongs to. These flows (and the actual SNAT flow) get added
- * by add_lb_ct_snat_hairpin_dp_flows(). */
+ * If this LB uses "hairpin_snat_ip", we can SNAT using that address, but
+ * we have to add a separate flow per datapath. */
 static void
 add_lb_ct_snat_hairpin_vip_flow(const struct ovn_controller_lb *lb,
-                                uint32_t id,
-                                struct ovn_lb_vip *lb_vip,
+                                const struct ovn_lb_vip *lb_vip,
+                                const struct hmap *local_datapaths,
                                 struct ovn_desired_flow_table *flow_table)
 {
     uint64_t stub[1024 / 8];
@@ -1851,51 +1774,33 @@
         address_family = AF_INET6;
     }
 
-    bool use_hairpin_snat_ip = false;
-    uint16_t priority = 100;
-    if ((address_family == AF_INET && lb->hairpin_snat_ips.n_ipv4_addrs) ||
-        (address_family == AF_INET6 && lb->hairpin_snat_ips.n_ipv6_addrs)) {
-        use_hairpin_snat_ip = true;
+    struct ofpact_conntrack *ct = ofpact_put_CT(&ofpacts);
+    ct->recirc_table = NX_CT_RECIRC_NONE;
+    ct->zone_src.field = mf_from_id(MFF_LOG_SNAT_ZONE);
+    ct->zone_src.ofs = 0;
+    ct->zone_src.n_bits = 16;
+    ct->flags = NX_CT_F_COMMIT;
+    ct->alg = 0;
 
-        /* A flow added for the "hairpin_snat_ip" case will also match on the
-           less restrictive general case. This can be seen as the match in both
-           cases is the same (the second dimension of the conjunction makes it
-           more restrictive). Therefore, we set the priority in the
-           "hairpin_snat_ip" case to be higher than the general case. */
-        priority = 200;
-    }
+    size_t nat_offset;
+    nat_offset = ofpacts.size;
+    ofpbuf_pull(&ofpacts, nat_offset);
 
-    if (use_hairpin_snat_ip) {
-        struct ofpact_conjunction *conj;
-        conj = ofpact_put_CONJUNCTION(&ofpacts);
-        conj->id = id;
-        conj->n_clauses = 2;
-        conj->clause = 1;
-    } else {
-        struct ofpact_conntrack *ct = ofpact_put_CT(&ofpacts);
-        ct->recirc_table = NX_CT_RECIRC_NONE;
-        ct->zone_src.field = mf_from_id(MFF_LOG_SNAT_ZONE);
-        ct->zone_src.ofs = 0;
-        ct->zone_src.n_bits = 16;
-        ct->flags = NX_CT_F_COMMIT;
-        ct->alg = 0;
-
-        size_t nat_offset;
-        nat_offset = ofpacts.size;
-        ofpbuf_pull(&ofpacts, nat_offset);
-
-        struct ofpact_nat *nat = ofpact_put_NAT(&ofpacts);
-        nat->flags = NX_NAT_F_SRC;
-        nat->range_af = address_family;
+    struct ofpact_nat *nat = ofpact_put_NAT(&ofpacts);
+    nat->flags = NX_NAT_F_SRC;
+    nat->range_af = address_family;
 
-        if (nat->range_af == AF_INET) {
-            nat->range.addr.ipv4.min = in6_addr_get_mapped_ipv4(&lb_vip->vip);
-        } else {
-            nat->range.addr.ipv6.min = lb_vip->vip;
-        }
-        ofpacts.header = ofpbuf_push_uninit(&ofpacts, nat_offset);
-        ofpact_finish(&ofpacts, &ct->ofpact);
+    if (nat->range_af == AF_INET) {
+        nat->range.addr.ipv4.min = lb->hairpin_snat_ips.n_ipv4_addrs
+                                   ? lb->hairpin_snat_ips.ipv4_addrs[0].addr
+                                   : in6_addr_get_mapped_ipv4(&lb_vip->vip);
+    } else {
+        nat->range.addr.ipv6.min = lb->hairpin_snat_ips.n_ipv6_addrs
+                                   ? lb->hairpin_snat_ips.ipv6_addrs[0].addr
+                                   : lb_vip->vip;
     }
+    ofpacts.header = ofpbuf_push_uninit(&ofpacts, nat_offset);
+    ofpact_finish(&ofpacts, &ct->ofpact);
 
     struct match match = MATCH_CATCHALL_INITIALIZER;
 
@@ -1942,15 +1847,31 @@
         }
     }
 
-    /* We need to "add_or_append" flows because this match may form part
-     * of flows if the same "hairpin_snat_ip" address is present on mutiple
-     * LBs */
-    ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN, priority,
-                              lb->slb->header_.uuid.parts[0],
-                              &match, &ofpacts, &lb->slb->header_.uuid,
-                              NX_CTLR_NO_METER, NULL);
-    ofpbuf_uninit(&ofpacts);
+    bool use_hairpin_snat_ip = false;
+    if ((address_family == AF_INET && lb->hairpin_snat_ips.n_ipv4_addrs) ||
+        (address_family == AF_INET6 && lb->hairpin_snat_ips.n_ipv6_addrs)) {
+        use_hairpin_snat_ip = true;
+    }
+
+    if (!use_hairpin_snat_ip) {
+        add_lb_ct_snat_hairpin_for_dp(lb, NULL, NULL,
+                                      &match, &ofpacts, flow_table);
+    } else {
+        for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+            add_lb_ct_snat_hairpin_for_dp(lb, lb->slb->datapaths[i],
+                                          local_datapaths,
+                                          &match, &ofpacts, flow_table);
+        }
+        if (lb->slb->datapath_group) {
+            for (size_t i = 0; i < lb->slb->datapath_group->n_datapaths; i++) {
+                add_lb_ct_snat_hairpin_for_dp(
+                    lb, lb->slb->datapath_group->datapaths[i],
+                    local_datapaths, &match, &ofpacts, flow_table);
+            }
+        }
+    }
 
+    ofpbuf_uninit(&ofpacts);
 }
 
 /* When a packet is sent to a LB VIP from a backend and the LB selects that
@@ -1960,13 +1881,10 @@
  * the LB entry in the NBDB.
  *
  * add_lb_ct_snat_hairpin_flows() adds OpenFlow flows for each LB in order to
- * achieve this behaviour.
- *
- * Note: 'conjunctive_id' must be a unique identifier for each LB as it is used
- * as a conjunctive flow id. */
+ * achieve this behaviour. */
 static void
 add_lb_ct_snat_hairpin_flows(const struct ovn_controller_lb *lb,
-                             uint32_t conjunctive_id,
+                             const struct hmap *local_datapaths,
                              struct ovn_desired_flow_table *flow_table)
 {
     /* We must add a flow for each LB VIP. In the general case, this flow
@@ -1988,10 +1906,9 @@
        above we do not need to add an OpenFlow flow for each datapath. However,
        if one LB has specified "hairpin_snat_ip", then we need to SNAT that LB
        using the "hairpin_snat_ip" address rather than the VIP. In order to
-       achieve that, we can use a conjunctive flow that matches on any VIPs
-       from the "hairpin_snat_ip" LB and any datapath on which this LB is
-       added. This conjuctive flow can then SNAT using the "hairpin_snat_ip" IP
-       address rather than the LB VIP.
+       achieve that, we need to add a datapath metadata match.  These flows
+       will match on a subset of fields of more general flows, generated for a
+       case without "hairpin_snat_ip", so they need to have a higher priority.
 
        There is another potential exception. Consider the case in which we have
        two LBs which both have "hairpin_snat_ip" set. If these LBs have
@@ -2001,23 +1918,17 @@
        same VIP should not be added to the same datapath. */
 
     for (int i = 0; i < lb->n_vips; i++) {
-        struct ovn_lb_vip *lb_vip = &lb->vips[i];
-        add_lb_ct_snat_hairpin_vip_flow(lb, conjunctive_id,
-                                        lb_vip, flow_table);
+        add_lb_ct_snat_hairpin_vip_flow(lb, &lb->vips[i], local_datapaths,
+                                        flow_table);
     }
-
-    add_lb_ct_snat_hairpin_dp_flows(lb, conjunctive_id, flow_table);
 }
 
 static void
 consider_lb_hairpin_flows(const struct ovn_controller_lb *lb,
+                          const struct hmap *local_datapaths,
                           bool use_ct_mark,
-                          struct ovn_desired_flow_table *flow_table,
-                          struct simap *ids)
+                          struct ovn_desired_flow_table *flow_table)
 {
-    int id = simap_get(ids, lb->slb->name);
-    VLOG_DBG("Load Balancer %s has conjunctive flow id %u", lb->slb->name, id);
-
     for (size_t i = 0; i < lb->n_vips; i++) {
         struct ovn_lb_vip *lb_vip = &lb->vips[i];
 
@@ -2029,37 +1940,21 @@
         }
     }
 
-    add_lb_ct_snat_hairpin_flows(lb, id, flow_table);
+    add_lb_ct_snat_hairpin_flows(lb, local_datapaths, flow_table);
 }
 
 /* Adds OpenFlow flows to flow tables for each Load balancer VIPs and
  * backends to handle the load balanced hairpin traffic. */
 static void
 add_lb_hairpin_flows(const struct hmap *local_lbs,
+                     const struct hmap *local_datapaths,
                      bool use_ct_mark,
-                     struct ovn_desired_flow_table *flow_table,
-                     struct simap *ids,
-                     struct id_pool *pool)
+                     struct ovn_desired_flow_table *flow_table)
 {
-    uint32_t id;
     const struct ovn_controller_lb *lb;
     HMAP_FOR_EACH (lb, hmap_node, local_lbs) {
-        /* Allocate a unique 32-bit integer to this load-balancer. This will
-         * be used as a conjunctive flow id in the OFTABLE_CT_SNAT_HAIRPIN
-         * table.
-         *
-         * If we are unable to allocate a unique ID then we have run out of
-         * ids. As this is unrecoverable then we abort. However, this is
-         * unlikely to happen as it would be mean that we have created
-         * "UINT32_MAX" load-balancers.
-         */
-
-        id = simap_get(ids, lb->slb->name);
-        if (!id) {
-            ovs_assert(id_pool_alloc_id(pool, &id));
-            simap_put(ids, lb->slb->name, id);
-        }
-        consider_lb_hairpin_flows(lb, use_ct_mark, flow_table, ids);
+        consider_lb_hairpin_flows(lb, local_datapaths,
+                                  use_ct_mark, flow_table);
     }
 }
 
@@ -2196,10 +2091,9 @@
                        l_ctx_in->local_datapaths,
                        l_ctx_out->flow_table);
     add_lb_hairpin_flows(l_ctx_in->local_lbs,
+                         l_ctx_in->local_datapaths,
                          l_ctx_in->lb_hairpin_use_ct_mark,
-                         l_ctx_out->flow_table,
-                         l_ctx_out->hairpin_lb_ids,
-                         l_ctx_out->hairpin_id_pool);
+                         l_ctx_out->flow_table);
     add_fdb_flows(l_ctx_in->fdb_table, l_ctx_in->local_datapaths,
                   l_ctx_out->flow_table);
     add_port_sec_flows(l_ctx_in->binding_lports, l_ctx_in->chassis,
@@ -2423,9 +2317,6 @@
 {
     const struct ovn_controller_lb *lb;
 
-    struct id_pool *pool = l_ctx_out->hairpin_id_pool;
-    struct simap *ids = l_ctx_out->hairpin_lb_ids;
-
     struct uuidset_node *uuid_node;
     UUIDSET_FOR_EACH (uuid_node, deleted_lbs) {
         lb = ovn_controller_lb_find(old_lbs, &uuid_node->uuid);
@@ -2433,8 +2324,6 @@
         VLOG_DBG("Remove hairpin flows for deleted load balancer "UUID_FMT,
                  UUID_ARGS(&uuid_node->uuid));
         ofctrl_remove_flows(l_ctx_out->flow_table, &uuid_node->uuid);
-        id_pool_free_id(pool, simap_get(ids, lb->slb->name));
-        simap_find_and_delete(ids, lb->slb->name);
     }
 
     UUIDSET_FOR_EACH (uuid_node, updated_lbs) {
@@ -2443,32 +2332,19 @@
         VLOG_DBG("Remove and add hairpin flows for updated load balancer "
                   UUID_FMT, UUID_ARGS(&uuid_node->uuid));
         ofctrl_remove_flows(l_ctx_out->flow_table, &uuid_node->uuid);
-        consider_lb_hairpin_flows(lb, l_ctx_in->lb_hairpin_use_ct_mark,
-                                  l_ctx_out->flow_table,
-                                  l_ctx_out->hairpin_lb_ids);
+        consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
+                                  l_ctx_in->lb_hairpin_use_ct_mark,
+                                  l_ctx_out->flow_table);
     }
 
     UUIDSET_FOR_EACH (uuid_node, new_lbs) {
         lb = ovn_controller_lb_find(l_ctx_in->local_lbs, &uuid_node->uuid);
 
-        /* Allocate a unique 32-bit integer to this load-balancer. This
-         * will be used as a conjunctive flow id in the
-         * OFTABLE_CT_SNAT_HAIRPIN table.
-         *
-         * If we are unable to allocate a unique ID then we have run out of
-         * ids. As this is unrecoverable then we abort. However, this is
-         * unlikely to happen as it would be mean that we have created
-         * "UINT32_MAX" load-balancers.
-         */
-        uint32_t id;
-        ovs_assert(id_pool_alloc_id(pool, &id));
-        simap_put(ids, lb->slb->name, id);
-
         VLOG_DBG("Add load balancer hairpin flows for "UUID_FMT,
                  UUID_ARGS(&uuid_node->uuid));
-        consider_lb_hairpin_flows(lb, l_ctx_in->lb_hairpin_use_ct_mark,
-                                  l_ctx_out->flow_table,
-                                  l_ctx_out->hairpin_lb_ids);
+        consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
+                                  l_ctx_in->lb_hairpin_use_ct_mark,
+                                  l_ctx_out->flow_table);
     }
 
     return true;
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/lflow.h ovn-23.03.0/controller/lflow.h
--- ovn-23.03.0~git20230221.038cfb1/controller/lflow.h	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/lflow.h	2023-03-03 18:37:48.000000000 +0000
@@ -128,8 +128,6 @@
     struct lflow_cache *lflow_cache;
     struct conj_ids *conj_ids;
     struct uuidset *objs_processed;
-    struct simap *hairpin_lb_ids;
-    struct id_pool *hairpin_id_pool;
 };
 
 void lflow_init(void);
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/ovn-controller.c ovn-23.03.0/controller/ovn-controller.c
--- ovn-23.03.0~git20230221.038cfb1/controller/ovn-controller.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/ovn-controller.c	2023-03-03 18:37:48.000000000 +0000
@@ -3264,11 +3264,6 @@
     struct lflow_cache *lflow_cache;
 };
 
-struct lflow_output_hairpin_data {
-    struct id_pool *pool;
-    struct simap   ids;
-};
-
 struct ed_type_lflow_output {
     /* Logical flow table */
     struct ovn_desired_flow_table flow_table;
@@ -3290,9 +3285,6 @@
      * full recompute. */
     struct lflow_output_persistent_data pd;
 
-    /* Data for managing hairpin flow conjunctive flow ids. */
-    struct lflow_output_hairpin_data hd;
-
     /* Fixed neighbor discovery supported options. */
     struct hmap nd_ra_opts;
 
@@ -3448,8 +3440,6 @@
     l_ctx_out->conj_ids = &fo->conj_ids;
     l_ctx_out->objs_processed = &fo->objs_processed;
     l_ctx_out->lflow_cache = fo->pd.lflow_cache;
-    l_ctx_out->hairpin_id_pool = fo->hd.pool;
-    l_ctx_out->hairpin_lb_ids = &fo->hd.ids;
 }
 
 static void *
@@ -3463,8 +3453,6 @@
     objdep_mgr_init(&data->lflow_deps_mgr);
     lflow_conj_ids_init(&data->conj_ids);
     uuidset_init(&data->objs_processed);
-    simap_init(&data->hd.ids);
-    data->hd.pool = id_pool_create(1, UINT32_MAX - 1);
     nd_ra_opts_init(&data->nd_ra_opts);
     controller_event_opts_init(&data->controller_event_opts);
     flow_collector_ids_init(&data->collector_ids);
@@ -3489,8 +3477,6 @@
     lflow_conj_ids_destroy(&flow_output_data->conj_ids);
     uuidset_destroy(&flow_output_data->objs_processed);
     lflow_cache_destroy(flow_output_data->pd.lflow_cache);
-    simap_destroy(&flow_output_data->hd.ids);
-    id_pool_destroy(flow_output_data->hd.pool);
     nd_ra_opts_destroy(&flow_output_data->nd_ra_opts);
     controller_event_opts_destroy(&flow_output_data->controller_event_opts);
     flow_collector_ids_destroy(&flow_output_data->collector_ids);
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/physical.c ovn-23.03.0/controller/physical.c
--- ovn-23.03.0~git20230221.038cfb1/controller/physical.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/physical.c	2023-03-03 18:37:48.000000000 +0000
@@ -505,7 +505,7 @@
         const char *tokens
             = get_chassis_mac_mappings(&chassis->other_config, chassis->name);
 
-        if (!strlen(tokens)) {
+        if (!tokens[0]) {
             continue;
         }
 
diff -Nru ovn-23.03.0~git20230221.038cfb1/controller/pinctrl.c ovn-23.03.0/controller/pinctrl.c
--- ovn-23.03.0~git20230221.038cfb1/controller/pinctrl.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/controller/pinctrl.c	2023-03-03 18:37:48.000000000 +0000
@@ -3775,12 +3775,12 @@
 
     const char *dnssl = smap_get(&pb->options, "ipv6_ra_dnssl");
     if (dnssl) {
-        ds_put_buffer(&config->dnssl, dnssl, strlen(dnssl));
+        ds_put_cstr(&config->dnssl, dnssl);
     }
 
     const char *route_info = smap_get(&pb->options, "ipv6_ra_route_info");
     if (route_info) {
-        ds_put_buffer(&config->route_info, route_info, strlen(route_info));
+        ds_put_cstr(&config->route_info, route_info);
     }
 
     return config;
@@ -5825,7 +5825,7 @@
     int ofs;
     if (!extract_addresses(addresses, laddrs, &ofs)) {
         return false;
-    } else if (ofs >= strlen(addresses)) {
+    } else if (!addresses[ofs]) {
         return true;
     }
 
diff -Nru ovn-23.03.0~git20230221.038cfb1/debian/changelog ovn-23.03.0/debian/changelog
--- ovn-23.03.0~git20230221.038cfb1/debian/changelog	2023-02-22 13:32:19.000000000 +0000
+++ ovn-23.03.0/debian/changelog	2023-03-06 08:14:26.000000000 +0000
@@ -1,3 +1,10 @@
+ovn (23.03.0-1) unstable; urgency=medium
+
+  * Team upload.
+  * Update upstream source from tag 'upstream/23.03.0'
+
+ -- Frode Nordahl <frode.nordahl@canonical.com>  Mon, 06 Mar 2023 09:14:26 +0100
+
 ovn (23.03.0~git20230221.038cfb1-1) unstable; urgency=medium
 
   * Team upload.
diff -Nru ovn-23.03.0~git20230221.038cfb1/.github/workflows/ovn-kubernetes.yml ovn-23.03.0/.github/workflows/ovn-kubernetes.yml
--- ovn-23.03.0~git20230221.038cfb1/.github/workflows/ovn-kubernetes.yml	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/.github/workflows/ovn-kubernetes.yml	2023-03-03 18:37:48.000000000 +0000
@@ -16,7 +16,7 @@
   GO_VERSION: "1.18.4"
   K8S_VERSION: v1.24.0
   OVNKUBE_COMMIT: "master"
-  LIBOVSDB_COMMIT: "98c0bad3cff1"
+  LIBOVSDB_COMMIT: "a6a173993830"
   KIND_CLUSTER_NAME: ovn
   KIND_INSTALL_INGRESS: true
   KIND_ALLOW_SYSTEM_WRITES: true
diff -Nru ovn-23.03.0~git20230221.038cfb1/ic/ovn-ic.c ovn-23.03.0/ic/ovn-ic.c
--- ovn-23.03.0~git20230221.038cfb1/ic/ovn-ic.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/ic/ovn-ic.c	2023-03-03 18:37:48.000000000 +0000
@@ -1162,7 +1162,7 @@
             ipv6_format_addr(&nexthop, &msg);
         }
 
-        ds_put_format(&msg, ", route_table: %s", strlen(nb_route->route_table)
+        ds_put_format(&msg, ", route_table: %s", nb_route->route_table[0]
                                                  ? nb_route->route_table
                                                  : "<main>");
 
@@ -1348,7 +1348,7 @@
                 continue;
             }
 
-            if (strlen(isb_route->route_table) &&
+            if (isb_route->route_table[0] &&
                 strcmp(isb_route->route_table, ts_route_table)) {
                 if (VLOG_IS_DBG_ENABLED()) {
                     VLOG_DBG("Skip learning static route %s -> %s as either "
diff -Nru ovn-23.03.0~git20230221.038cfb1/lib/ovn-util.c ovn-23.03.0/lib/ovn-util.c
--- ovn-23.03.0~git20230221.038cfb1/lib/ovn-util.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/lib/ovn-util.c	2023-03-03 18:37:48.000000000 +0000
@@ -105,18 +105,34 @@
     char ipv6_s[IPV6_SCAN_LEN + 1];
     struct eth_addr ea;
     ovs_be32 ip;
-    int n;
-    return (!strcmp(address, "dynamic")
-            || (ovs_scan(address, "dynamic "IP_SCAN_FMT"%n",
-                         IP_SCAN_ARGS(&ip), &n)
-                         && address[n] == '\0')
-            || (ovs_scan(address, "dynamic "IP_SCAN_FMT" "IPV6_SCAN_FMT"%n",
-                         IP_SCAN_ARGS(&ip), ipv6_s, &n)
-                         && address[n] == '\0')
-            || (ovs_scan(address, "dynamic "IPV6_SCAN_FMT"%n",
-                         ipv6_s, &n) && address[n] == '\0')
-            || (ovs_scan(address, ETH_ADDR_SCAN_FMT" dynamic%n",
-                         ETH_ADDR_SCAN_ARGS(ea), &n) && address[n] == '\0'));
+    int n = 0;
+
+    if (!strncmp(address, "dynamic", 7)) {
+        n = 7;
+        if (!address[n]) {
+            /* "dynamic" */
+            return true;
+        }
+        if (ovs_scan_len(address, &n, " "IP_SCAN_FMT, IP_SCAN_ARGS(&ip))
+            && !address[n]) {
+            /* "dynamic x.x.x.x" */
+            return true;
+        }
+        if (ovs_scan_len(address, &n, " "IPV6_SCAN_FMT, ipv6_s)
+            && !address[n]) {
+            /* Either "dynamic xxxx::xxxx" or "dynamic x.x.x.x xxxx::xxxx". */
+            return true;
+        }
+        return false;
+    }
+
+    if (ovs_scan_len(address, &n, ETH_ADDR_SCAN_FMT" dynamic",
+                     ETH_ADDR_SCAN_ARGS(ea)) && !address[n]) {
+        /* "xx:xx:xx:xx:xx:xx dynamic" */
+        return true;
+    }
+
+    return false;
 }
 
 static bool
@@ -128,7 +144,6 @@
     const char *buf = address;
     const char *const start = buf;
     int buf_index = 0;
-    const char *buf_end = buf + strlen(address);
 
     if (extract_eth_addr) {
         if (!ovs_scan_len(buf, &buf_index, ETH_ADDR_SCAN_FMT,
@@ -151,7 +166,7 @@
      * and store in the 'laddrs'. Break the loop if invalid data is found.
      */
     buf += buf_index;
-    while (buf < buf_end) {
+    while (*buf != '\0') {
         buf_index = 0;
         error = ip_parse_cidr_len(buf, &buf_index, &ip4, &plen);
         if (!error) {
@@ -205,7 +220,7 @@
     int ofs;
     bool success = extract_addresses(address, laddrs, &ofs);
 
-    if (success && ofs < strlen(address)) {
+    if (success && address[ofs]) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
         VLOG_INFO_RL(&rl, "invalid syntax '%s' in address", address);
     }
@@ -825,24 +840,6 @@
                      N_OVNACTS, OVN_INTERNAL_MINOR_VER);
 }
 
-unsigned int
-ovn_parse_internal_version_minor(const char *ver)
-{
-    const char *p = ver + strlen(ver);
-    for (int i = 0; i < strlen(ver); i++) {
-        if (*p == '.') {
-            break;
-        }
-        p--;
-    }
-
-    unsigned int minor;
-    if (ovs_scan(p, ".%u", &minor)) {
-        return minor;
-    }
-    return 0;
-}
-
 #ifdef DDLOG
 /* Callbacks used by the ddlog northd code to print warnings and errors. */
 void
diff -Nru ovn-23.03.0~git20230221.038cfb1/lib/ovn-util.h ovn-23.03.0/lib/ovn-util.h
--- ovn-23.03.0~git20230221.038cfb1/lib/ovn-util.h	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/lib/ovn-util.h	2023-03-03 18:37:48.000000000 +0000
@@ -266,11 +266,6 @@
  * value. */
 char *ovn_get_internal_version(void);
 
-/* Parse the provided internal version string and return the "minor" part which
- * is expected to be an unsigned integer followed by the last "." in the
- * string. Returns 0 if the string can't be parsed. */
-unsigned int ovn_parse_internal_version_minor(const char *ver);
-
 /* OVN Packet definitions. These may eventually find a home in OVS's
  * packets.h file. For the time being, they live here because OVN uses them
  * and OVS does not.
diff -Nru ovn-23.03.0~git20230221.038cfb1/NEWS ovn-23.03.0/NEWS
--- ovn-23.03.0~git20230221.038cfb1/NEWS	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/NEWS	2023-03-03 18:37:48.000000000 +0000
@@ -1,4 +1,4 @@
-OVN v23.03.0 - xx xxx xxxx
+OVN v23.03.0 - 03 Mar 2023
 --------------------------
   - ovn-controller: Experimental support for co-hosting multiple controller
     instances on the same host.
diff -Nru ovn-23.03.0~git20230221.038cfb1/northd/northd.c ovn-23.03.0/northd/northd.c
--- ovn-23.03.0~git20230221.038cfb1/northd/northd.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/northd/northd.c	2023-03-03 18:37:48.000000000 +0000
@@ -1497,7 +1497,10 @@
     const struct nbrec_logical_switch_port *nbsp; /* May be NULL. */
 
     struct lport_addresses *lsp_addrs;  /* Logical switch port addresses. */
-    unsigned int n_lsp_addrs;
+    unsigned int n_lsp_addrs;  /* Total length of lsp_addrs. */
+    unsigned int n_lsp_non_router_addrs; /* Number of elements from the
+                                          * beginning of 'lsp_addrs' extracted
+                                          * directly from LSP 'addresses'. */
 
     struct lport_addresses *ps_addrs;   /* Port security addresses. */
     unsigned int n_ps_addrs;
@@ -1817,35 +1820,21 @@
 }
 
 static void
-ipam_insert_lsp_addresses(struct ovn_datapath *od, struct ovn_port *op,
-                          char *address)
+ipam_insert_lsp_addresses(struct ovn_datapath *od,
+                          struct lport_addresses *laddrs)
 {
-    if (!od || !op || !address || !strcmp(address, "unknown")
-        || !strcmp(address, "router") || is_dynamic_lsp_address(address)) {
-        return;
-    }
-
-    struct lport_addresses laddrs;
-    if (!extract_lsp_addresses(address, &laddrs)) {
-        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-        VLOG_WARN_RL(&rl, "Extract addresses failed.");
-        return;
-    }
-    ipam_insert_mac(&laddrs.ea, true);
+    ipam_insert_mac(&laddrs->ea, true);
 
     /* IP is only added to IPAM if the switch's subnet option
      * is set, whereas MAC is always added to MACAM. */
     if (!od->ipam_info.allocated_ipv4s) {
-        destroy_lport_addresses(&laddrs);
         return;
     }
 
-    for (size_t j = 0; j < laddrs.n_ipv4_addrs; j++) {
-        uint32_t ip = ntohl(laddrs.ipv4_addrs[j].addr);
+    for (size_t j = 0; j < laddrs->n_ipv4_addrs; j++) {
+        uint32_t ip = ntohl(laddrs->ipv4_addrs[j].addr);
         ipam_insert_ip_for_datapath(od, ip);
     }
-
-    destroy_lport_addresses(&laddrs);
 }
 
 static void
@@ -1855,29 +1844,21 @@
         return;
     }
 
-    if (op->nbsp) {
+    if (op->n_lsp_non_router_addrs) {
         /* Add all the port's addresses to address data structures. */
-        for (size_t i = 0; i < op->nbsp->n_addresses; i++) {
-            ipam_insert_lsp_addresses(od, op, op->nbsp->addresses[i]);
-        }
-    } else if (op->nbrp) {
-        struct lport_addresses lrp_networks;
-        if (!extract_lrp_networks(op->nbrp, &lrp_networks)) {
-            static struct vlog_rate_limit rl
-                = VLOG_RATE_LIMIT_INIT(1, 1);
-            VLOG_WARN_RL(&rl, "Extract addresses failed.");
-            return;
+        for (size_t i = 0; i < op->n_lsp_non_router_addrs; i++) {
+            ipam_insert_lsp_addresses(od, &op->lsp_addrs[i]);
         }
-        ipam_insert_mac(&lrp_networks.ea, true);
+    } else if (op->lrp_networks.ea_s[0]) {
+        ipam_insert_mac(&op->lrp_networks.ea, true);
 
         if (!op->peer || !op->peer->nbsp || !op->peer->od || !op->peer->od->nbs
             || !smap_get(&op->peer->od->nbs->other_config, "subnet")) {
-            destroy_lport_addresses(&lrp_networks);
             return;
         }
 
-        for (size_t i = 0; i < lrp_networks.n_ipv4_addrs; i++) {
-            uint32_t ip = ntohl(lrp_networks.ipv4_addrs[i].addr);
+        for (size_t i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
+            uint32_t ip = ntohl(op->lrp_networks.ipv4_addrs[i].addr);
             /* If the router has the first IP address of the subnet, don't add
              * it to IPAM. We already added this when we initialized IPAM for
              * the datapath. This will just result in an erroneous message
@@ -1887,8 +1868,6 @@
                 ipam_insert_ip_for_datapath(op->peer->od, ip);
             }
         }
-
-        destroy_lport_addresses(&lrp_networks);
     }
 }
 
@@ -2573,6 +2552,7 @@
                     }
                     op->n_lsp_addrs++;
                 }
+                op->n_lsp_non_router_addrs = op->n_lsp_addrs;
 
                 op->ps_addrs
                     = xmalloc(sizeof *op->ps_addrs * nbsp->n_port_security);
@@ -7179,9 +7159,9 @@
     ds_put_format(&aff_action, "%s = %s; ", reg_vip, lb_vip->vip_str);
     ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \"");
 
-    if (lb_vip->vip_port) {
-        ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%"PRIu16 : "%s:%"PRIu16,
-                      lb_vip->vip_str, lb_vip->vip_port);
+    if (lb_vip->port_str) {
+        ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%s" : "%s:%s",
+                      lb_vip->vip_str, lb_vip->port_str);
     } else {
         ds_put_cstr(&aff_action_learn, lb_vip->vip_str);
     }
@@ -7193,12 +7173,12 @@
     ds_put_cstr(&aff_action_learn, "\", backend = \"");
 
     /* Prepare common part of affinity learn match. */
-    if (lb_vip->vip_port) {
+    if (lb_vip->port_str) {
         ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
                       "ct.new && %s && %s == %s && "
-                      REG_ORIG_TP_DPORT_ROUTER" == %"PRIu16" && "
+                      REG_ORIG_TP_DPORT_ROUTER" == %s && "
                       "%s.dst == ", ip_match, reg_vip, lb_vip->vip_str,
-                      lb_vip->vip_port, ip_match);
+                      lb_vip->port_str, ip_match);
     } else {
         ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
                       "ct.new && %s && %s == %s && %s.dst == ", ip_match,
@@ -7244,7 +7224,7 @@
         ds_put_cstr(&aff_action, ");");
         ds_put_char(&aff_action_learn, '"');
 
-        if (lb_vip->vip_port) {
+        if (lb_vip->port_str) {
             ds_put_format(&aff_action_learn, ", proto = %s", lb->proto);
         }
 
@@ -7334,9 +7314,9 @@
                       lb_vip->vip_str);
     }
 
-    if (lb_vip->vip_port) {
-        ds_put_format(&new_lb_match, " && "REG_ORIG_TP_DPORT " == %"PRIu16,
-                      lb_vip->vip_port);
+    if (lb_vip->port_str) {
+        ds_put_format(&new_lb_match, " && "REG_ORIG_TP_DPORT " == %s",
+                      lb_vip->port_str);
     }
 
     static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
@@ -7363,11 +7343,11 @@
                   reg_vip, lb_vip->vip_str);
     ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \"");
 
-    if (lb_vip->vip_port) {
-        ds_put_format(&aff_action, REG_ORIG_TP_DPORT" = %"PRIu16"; ",
-                      lb_vip->vip_port);
-        ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%"PRIu16 : "%s:%"PRIu16,
-                      lb_vip->vip_str, lb_vip->vip_port);
+    if (lb_vip->port_str) {
+        ds_put_format(&aff_action, REG_ORIG_TP_DPORT" = %s; ",
+                      lb_vip->port_str);
+        ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%s" : "%s:%s",
+                      lb_vip->vip_str, lb_vip->port_str);
     } else {
         ds_put_cstr(&aff_action_learn, lb_vip->vip_str);
     }
@@ -7376,12 +7356,12 @@
     ds_put_cstr(&aff_action_learn, "\", backend = \"");
 
     /* Prepare common part of affinity learn match. */
-    if (lb_vip->vip_port) {
+    if (lb_vip->port_str) {
         ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
                       "ct.new && %s && %s == %s && "
-                      REG_ORIG_TP_DPORT" == %"PRIu16" && %s.dst == ",
+                      REG_ORIG_TP_DPORT" == %s && %s.dst == ",
                       ip_match, reg_vip, lb_vip->vip_str,
-                      lb_vip->vip_port, ip_match);
+                      lb_vip->port_str, ip_match);
     } else {
         ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
                       "ct.new && %s && %s == %s && %s.dst == ",
@@ -7422,7 +7402,7 @@
         ds_put_cstr(&aff_action, ");");
         ds_put_char(&aff_action_learn, '"');
 
-        if (lb_vip->vip_port) {
+        if (lb_vip->port_str) {
             ds_put_format(&aff_action_learn, ", proto = %s", lb->proto);
         }
 
@@ -7856,7 +7836,7 @@
 }
 
 /*
- * Ingress table 24: Flows that flood self originated ARP/RARP/ND packets in
+ * Ingress table 25: Flows that flood self originated ARP/RARP/ND packets in
  * the switching domain.
  */
 static void
@@ -7970,7 +7950,7 @@
 }
 
 /*
- * Ingress table 24: Flows that forward ARP/ND requests only to the routers
+ * Ingress table 25: Flows that forward ARP/ND requests only to the routers
  * that own the addresses. Other ARP/ND packets are still flooded in the
  * switching domain as regular broadcast.
  */
@@ -8007,7 +7987,7 @@
 }
 
 /*
- * Ingress table 24: Flows that forward ARP/ND requests only to the routers
+ * Ingress table 25: Flows that forward ARP/ND requests only to the routers
  * that own the addresses.
  * Priorities:
  * - 80: self originated GARPs that need to follow regular processing.
@@ -8336,7 +8316,8 @@
 
     struct ovn_datapath *od;
 
-    /* Ingress table 25: Destination lookup for unknown MACs (priority 0). */
+    /* Ingress table 25/26: Destination lookup for unknown MACs
+     * (priority 0). */
     HMAP_FOR_EACH (od, key_node, datapaths) {
         if (!od->nbs) {
             continue;
@@ -8411,7 +8392,7 @@
     }
 }
 
-/* Ingress table 18: ARP/ND responder, skip requests coming from localnet
+/* Ingress table 19: ARP/ND responder, skip requests coming from localnet
  * ports. (priority 100); see ovn-northd.8.xml for the rationale. */
 
 static void
@@ -8429,7 +8410,7 @@
     }
 }
 
-/* Ingress table 18: ARP/ND responder, reply for known IPs.
+/* Ingress table 19: ARP/ND responder, reply for known IPs.
  * (priority 50). */
 static void
 build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
@@ -8689,7 +8670,7 @@
     }
 }
 
-/* Ingress table 18: ARP/ND responder, by default goto next.
+/* Ingress table 19: ARP/ND responder, by default goto next.
  * (priority 0)*/
 static void
 build_lswitch_arp_nd_responder_default(struct ovn_datapath *od,
@@ -8700,7 +8681,7 @@
     }
 }
 
-/* Ingress table 18: ARP/ND responder for service monitor source ip.
+/* Ingress table 19: ARP/ND responder for service monitor source ip.
  * (priority 110)*/
 static void
 build_lswitch_arp_nd_service_monitor(struct ovn_northd_lb *lb,
@@ -8769,7 +8750,7 @@
 }
 
 
-/* Logical switch ingress table 19 and 20: DHCP options and response
+/* Logical switch ingress table 20 and 21: DHCP options and response
  * priority 100 flows. */
 static void
 build_lswitch_dhcp_options_and_response(struct ovn_port *op,
@@ -8821,11 +8802,11 @@
     }
 }
 
-/* Ingress table 19 and 20: DHCP options and response, by default goto
+/* Ingress table 20 and 21: DHCP options and response, by default goto
  * next. (priority 0).
- * Ingress table 21 and 22: DNS lookup and response, by default goto next.
+ * Ingress table 22 and 23: DNS lookup and response, by default goto next.
  * (priority 0).
- * Ingress table 23 - External port handling, by default goto next.
+ * Ingress table 24 - External port handling, by default goto next.
  * (priority 0). */
 static void
 build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od,
@@ -8840,7 +8821,7 @@
     }
 }
 
-/* Logical switch ingress table 21 and 22: DNS lookup and response
+/* Logical switch ingress table 22 and 23: DNS lookup and response
 * priority 100 flows.
 */
 static void
@@ -8868,7 +8849,7 @@
     }
 }
 
-/* Table 23: External port. Drop ARP request for router ips from
+/* Table 24: External port. Drop ARP request for router ips from
  * external ports  on chassis not binding those ports.
  * This makes the router pipeline to be run only on the chassis
  * binding the external ports. */
@@ -8885,7 +8866,7 @@
     }
 }
 
-/* Ingress table 24: Destination lookup, broadcast and multicast handling
+/* Ingress table 25: Destination lookup, broadcast and multicast handling
  * (priority 70 - 100). */
 static void
 build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
@@ -8970,7 +8951,7 @@
 }
 
 
-/* Ingress table 24: Add IP multicast flows learnt from IGMP/MLD
+/* Ingress table 25: Add IP multicast flows learnt from IGMP/MLD
  * (priority 90). */
 static void
 build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
@@ -9054,7 +9035,7 @@
 
 static struct ovs_mutex mcgroup_mutex = OVS_MUTEX_INITIALIZER;
 
-/* Ingress table 24: Destination lookup, unicast handling (priority 50), */
+/* Ingress table 25: Destination lookup, unicast handling (priority 50), */
 static void
 build_lswitch_ip_unicast_lookup(struct ovn_port *op,
                                 struct hmap *lflows,
@@ -9623,7 +9604,7 @@
 static uint32_t
 get_route_table_id(struct simap *route_tables, const char *route_table_name)
 {
-    if (!route_table_name || !strlen(route_table_name)) {
+    if (!route_table_name || !route_table_name[0]) {
         return 0;
     }
 
@@ -9698,7 +9679,7 @@
     struct in6_addr nexthop;
     unsigned int plen;
     bool is_discard_route = !strcmp(route->nexthop, "discard");
-    bool valid_nexthop = strlen(route->nexthop) && !is_discard_route;
+    bool valid_nexthop = route->nexthop[0] && !is_discard_route;
     if (valid_nexthop) {
         if (!ip46_parse_cidr(route->nexthop, &nexthop, &plen)) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
@@ -9989,7 +9970,7 @@
                          route->output_port, route->ip_prefix);
             return false;
         }
-        if (strlen(route->nexthop)) {
+        if (route->nexthop[0]) {
             lrp_addr_s = find_lrp_member_ip(out_port, route->nexthop);
         }
         if (!lrp_addr_s) {
@@ -10021,7 +10002,7 @@
                 continue;
             }
 
-            if (strlen(route->nexthop)) {
+            if (route->nexthop[0]) {
                 lrp_addr_s = find_lrp_member_ip(out_port, route->nexthop);
             }
             if (lrp_addr_s) {
@@ -10337,7 +10318,7 @@
     } else {
         ds_put_format(&common_actions, REG_ECMP_GROUP_ID" = 0; %s = ",
                       is_ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6);
-        if (gateway && strlen(gateway)) {
+        if (gateway && gateway[0]) {
             ds_put_cstr(&common_actions, gateway);
         } else {
             ds_put_format(&common_actions, "ip%s.dst", is_ipv4 ? "4" : "6");
@@ -10622,10 +10603,10 @@
      */
     ds_put_format(match, "ct.new && !ct.rel && %s && %s == %s",
                   ip_match, ip_reg, lb_vip->vip_str);
-    if (lb_vip->vip_port) {
+    if (lb_vip->port_str) {
         prio = 120;
-        ds_put_format(match, " && %s && "REG_ORIG_TP_DPORT_ROUTER" == %d",
-                      lb->proto, lb_vip->vip_port);
+        ds_put_format(match, " && %s && "REG_ORIG_TP_DPORT_ROUTER" == %s",
+                      lb->proto, lb_vip->port_str);
     }
 
     ds_put_cstr(&est_match, "ct.est");
diff -Nru ovn-23.03.0~git20230221.038cfb1/ovn-vif/NEWS ovn-23.03.0/ovn-vif/NEWS
--- ovn-23.03.0~git20230221.038cfb1/ovn-vif/NEWS	2023-02-21 07:26:39.000000000 +0000
+++ ovn-23.03.0/ovn-vif/NEWS	2023-03-06 08:02:58.000000000 +0000
@@ -1,4 +1,7 @@
-OVN VIF v23.03.0 - xx xxx xxxx
+Post 23.03
+----------
+
+OVN VIF v23.03.0 - 06 Mar 2023
 ------------------------------
 
 OVN VIF v22.12.0 - 19 Dec 2022
diff -Nru ovn-23.03.0~git20230221.038cfb1/tests/atlocal.in ovn-23.03.0/tests/atlocal.in
--- ovn-23.03.0~git20230221.038cfb1/tests/atlocal.in	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/tests/atlocal.in	2023-03-03 18:37:48.000000000 +0000
@@ -166,6 +166,9 @@
 # Set HAVE_TCPDUMP
 find_command tcpdump
 
+# Set HAVE_XXD
+find_command xxd
+
 # Set HAVE_LFTP
 find_command lftp
 
diff -Nru ovn-23.03.0~git20230221.038cfb1/tests/network-functions.at ovn-23.03.0/tests/network-functions.at
--- ovn-23.03.0~git20230221.038cfb1/tests/network-functions.at	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/tests/network-functions.at	2023-03-03 18:37:48.000000000 +0000
@@ -128,12 +128,18 @@
 # hex_to_binary HEXDIGITS
 #
 # Converts the pairs of HEXDIGITS into bytes and prints them on stdout.
-hex_to_binary() {
-    printf $(while test -n "$1"; do
-                 printf '\\%03o' 0x$(expr "$1" : '\(..\)')
-                 set -- "${1##??}"
-             done)
-}
+if test x$HAVE_XXD = xno; then
+    hex_to_binary() {
+        printf $(while test -n "$1"; do
+                     printf '\\%03o' 0x$(expr "$1" : '\(..\)')
+                     set -- "${1##??}"
+                 done)
+    }
+else
+    hex_to_binary() {
+        echo $1 | xxd -r -p
+    }
+fi
 
 # tcpdump_hex TITLE PACKET
 #
diff -Nru ovn-23.03.0~git20230221.038cfb1/tests/ovn.at ovn-23.03.0/tests/ovn.at
--- ovn-23.03.0~git20230221.038cfb1/tests/ovn.at	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/tests/ovn.at	2023-03-03 18:37:48.000000000 +0000
@@ -4467,7 +4467,12 @@
 done
 
 # Gracefully terminate daemons
-OVN_CLEANUP([hv1],[hv2],[vtep])
+
+OVN_CLEANUP_SBOX([hv1])
+OVN_CLEANUP_SBOX([hv2])
+OVS_WAIT_UNTIL([test `as vtep ovs-vsctl list-ports vtep_bfd | wc -l` -eq 0])
+OVN_CLEANUP([vtep])
+
 OVN_CLEANUP_VSWITCH([hv3])
 
 AT_CLEANUP
@@ -25286,8 +25291,10 @@
 AT_KEYWORDS([slowtest])
 
 ovn_init_ic_db
-n_az=5
-n_ts=5
+# The number needs to stay relatively low due to high memory consumption
+# with address sanitizers enabled.
+n_az=3
+n_ts=3
 for i in `seq 1 $n_az`; do
     ovn_start az$i
 done
diff -Nru ovn-23.03.0~git20230221.038cfb1/tests/system-ovn.at ovn-23.03.0/tests/system-ovn.at
--- ovn-23.03.0~git20230221.038cfb1/tests/system-ovn.at	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/tests/system-ovn.at	2023-03-03 18:37:48.000000000 +0000
@@ -5135,7 +5135,7 @@
     ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo
     c=$(cat sw0-p1-rej-icmp.pcap | grep \
 "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5148,7 +5148,7 @@
     ip netns exec sw0-p1-rej nc -u 10.0.0.4 94 < foo
     c=$(cat sw0-p1-rej-icmp.pcap | grep \
 "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 94 unreachable" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 kill $(pidof tcpdump)
 
@@ -5161,7 +5161,7 @@
     c=$(cat sw0-p2-rej-icmp6.pcap | grep \
 "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \
 aef0::3 udp port 90" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5175,7 +5175,7 @@
     c=$(cat sw0-p2-rej-icmp6.pcap | grep \
 "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \
 aef0::3 udp port 94" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 kill $(pidof tcpdump)
 
@@ -5201,7 +5201,7 @@
     ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo
     c=$(cat sw0-p1-rej-icmp.pcap | grep \
 "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5214,7 +5214,7 @@
     c=$(cat sw0-p2-rej-icmp6.pcap | grep \
 "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \
 aef0::3 udp port 90" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 kill $(pidof tcpdump)
 
@@ -5385,7 +5385,7 @@
     ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo
     c=$(cat sw0-p1-rej-icmp.pcap | grep \
 "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5398,7 +5398,7 @@
     ip netns exec sw0-p1-rej nc -u 10.0.0.4 94 < foo
     c=$(cat sw0-p1-rej-icmp.pcap | grep \
 "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 94 unreachable" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5412,7 +5412,7 @@
     c=$(cat sw0-p2-rej-icmp6.pcap | grep \
 "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \
 aef0::3 udp port 90" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5426,7 +5426,7 @@
     c=$(cat sw0-p2-rej-icmp6.pcap | grep \
 "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \
 aef0::3 udp port 94" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 # Delete all the ACLs of pg0 and add the ACL with a generic match with reject action.
@@ -5452,7 +5452,7 @@
     ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo
     c=$(cat sw0-p1-rej-icmp.pcap | grep \
 "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -5465,7 +5465,7 @@
     c=$(cat sw0-p2-rej-icmp6.pcap | grep \
 "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \
 aef0::3 udp port 90" | uniq | wc -l)
-    test $c -eq 1
+    test $c -ge 1
 ])
 
 kill $(pidof tcpdump)
@@ -9612,6 +9612,7 @@
 
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([load-balancer template IPv4])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
 AT_SKIP_IF([test $HAVE_NC = no])
 AT_KEYWORDS([ovnlb templates])
 
@@ -9638,8 +9639,8 @@
 #         |
 # VM2 ----+
 #
-# A templated load balancer applied on LS1 and GW-Router with
-# VM1 as backend.  The VIP should be accessible from both VM2 and VM3.
+# Two templated load balancer applied on LS1 and GW-Router with
+# VM1 as backend.  The VIPs should be accessible from both VM2 and VM3.
 
 check ovn-nbctl                                                   \
     -- lr-add rtr                                                 \
@@ -9660,15 +9661,22 @@
     -- lsp-set-options ls2-rtr router-port=rtr-ls2                \
     -- lsp-add ls2 vm3 -- lsp-set-addresses vm3 00:00:00:00:00:03
 
-# Add a template LB that eventually expands to:
+# Add a TCP template LB that eventually expands to:
 # VIP=66.66.66.66:666 backends=42.42.42.2:4242 proto=tcp
+# And a UDP template LB that eventually expands to:
+# VIP=66.66.66.66:777 backends=42.42.42.2:4343 proto=udp
 
-AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" variables="{vip=66.66.66.66,vport=666,backends=\"42.42.42.2:4242\"}"],
+AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" \
+    variables="{vip=66.66.66.66,vport1=666,backends1=\"42.42.42.2:4242\",vport2=777,backends2=\"42.42.42.2:4343\"}"],
          [0], [ignore])
 
-check ovn-nbctl --template lb-add lb-test "^vip:^vport" "^backends" tcp \
-    -- ls-lb-add ls1 lb-test                                            \
-    -- lr-lb-add rtr lb-test
+check ovn-nbctl --template lb-add lb-test-tcp "^vip:^vport1" "^backends1" tcp \
+    -- ls-lb-add ls1 lb-test-tcp                                              \
+    -- lr-lb-add rtr lb-test-tcp
+
+check ovn-nbctl --template lb-add lb-test-udp "^vip:^vport2" "^backends2" udp \
+    -- ls-lb-add ls1 lb-test-udp                                              \
+    -- lr-lb-add rtr lb-test-udp
 
 ADD_NAMESPACES(vm1)
 ADD_VETH(vm1, vm1, br-int, "42.42.42.2/24", "00:00:00:00:00:01", "42.42.42.1")
@@ -9685,24 +9693,40 @@
 
 AT_CHECK([ovn-appctl -t ovn-controller debug/dump-local-template-vars | sort], [0], [dnl
 Local template vars:
-name: 'backends' value: '42.42.42.2:4242'
+name: 'backends1' value: '42.42.42.2:4242'
+name: 'backends2' value: '42.42.42.2:4343'
 name: 'vip' value: '66.66.66.66'
-name: 'vport' value: '666'
+name: 'vport1' value: '666'
+name: 'vport2' value: '777'
 ])
 
 # Start IPv4 TCP server on vm1.
 NETNS_DAEMONIZE([vm1], [nc -k -l 42.42.42.2 4242], [nc-vm1.pid])
 
+NETNS_DAEMONIZE([vm1],
+    [tcpdump -n -i vm1 -nnleX -c3 udp and dst 42.42.42.2 and dst port 4343 > vm1.pcap 2>/dev/null],
+    [tcpdump1.pid])
+
 # Make sure connecting to the VIP works (hairpin, via ls and via lr).
 NS_CHECK_EXEC([vm1], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 
+NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 777 &], [0])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 777 &], [0])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 777 &], [0])
+
+OVS_WAIT_UNTIL([
+    requests=`grep "UDP" -c vm1.pcap`
+    test "${requests}" -ge "3"
+])
+
 AT_CLEANUP
 ])
 
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([load-balancer template IPv6])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
 AT_SKIP_IF([test $HAVE_NC = no])
 AT_KEYWORDS([ovnlb templates])
 
@@ -9729,8 +9753,8 @@
 #         |
 # VM2 ----+
 #
-# A templated load balancer applied on LS1 and GW-Router with
-# VM1 as backend.  The VIP should be accessible from both VM2 and VM3.
+# Two templated load balancer applied on LS1 and GW-Router with
+# VM1 as backend.  The VIPs should be accessible from both VM2 and VM3.
 
 check ovn-nbctl                                                   \
     -- lr-add rtr                                                 \
@@ -9752,14 +9776,21 @@
     -- lsp-add ls2 vm3 -- lsp-set-addresses vm3 00:00:00:00:00:03
 
 # Add a template LB that eventually expands to:
-# VIP=6666::1 backends=[4242::2]:4242 proto=tcp
+# VIP=[6666::1]:666 backends=[4242::2]:4242 proto=tcp
+# Add a template LB that eventually expands to:
+# VIP=[6666::1]:777 backends=[4242::2]:4343 proto=udp
 
-AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" variables="{vip=\"6666::1\",vport=666,backends=\"[[4242::2]]:4242\"}"],
+AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" \
+    variables="{vip=\"6666::1\",vport1=666,backends1=\"[[4242::2]]:4242\",vport2=777,backends2=\"[[4242::2]]:4343\"}"],
          [0], [ignore])
 
-check ovn-nbctl --template lb-add lb-test "^vip:^vport" "^backends" tcp ipv6 \
-    -- ls-lb-add ls1 lb-test                                                 \
-    -- lr-lb-add rtr lb-test
+check ovn-nbctl --template lb-add lb-test-tcp "^vip:^vport1" "^backends1" tcp ipv6 \
+    -- ls-lb-add ls1 lb-test-tcp                                                   \
+    -- lr-lb-add rtr lb-test-tcp
+
+check ovn-nbctl --template lb-add lb-test-udp "^vip:^vport2" "^backends2" udp ipv6 \
+    -- ls-lb-add ls1 lb-test-udp                                                   \
+    -- lr-lb-add rtr lb-test-udp
 
 ADD_NAMESPACES(vm1)
 ADD_VETH(vm1, vm1, br-int, "4242::2/64", "00:00:00:00:00:01", "4242::1")
@@ -9779,19 +9810,34 @@
 
 AT_CHECK([ovn-appctl -t ovn-controller debug/dump-local-template-vars | sort], [0], [dnl
 Local template vars:
-name: 'backends' value: '[[4242::2]]:4242'
+name: 'backends1' value: '[[4242::2]]:4242'
+name: 'backends2' value: '[[4242::2]]:4343'
 name: 'vip' value: '6666::1'
-name: 'vport' value: '666'
+name: 'vport1' value: '666'
+name: 'vport2' value: '777'
 ])
 
 # Start IPv6 TCP server on vm1.
 NETNS_DAEMONIZE([vm1], [nc -k -l 4242::2 4242], [nc-vm1.pid])
 
+NETNS_DAEMONIZE([vm1],
+    [tcpdump -n -i vm1 -nnleX -c3 udp and dst 4242::2 and dst port 4343 > vm1.pcap 2>/dev/null],
+    [tcpdump1.pid])
+
 # Make sure connecting to the VIP works (hairpin, via ls and via lr).
 NS_CHECK_EXEC([vm1], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm2], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 
+NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 777 &], [0])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 777 &], [0])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 777 &], [0])
+
+OVS_WAIT_UNTIL([
+    requests=`grep "UDP" -c vm1.pcap`
+    test "${requests}" -ge "3"
+])
+
 AT_CLEANUP
 ])
 
diff -Nru ovn-23.03.0~git20230221.038cfb1/utilities/ovn-dbctl.c ovn-23.03.0/utilities/ovn-dbctl.c
--- ovn-23.03.0~git20230221.038cfb1/utilities/ovn-dbctl.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/utilities/ovn-dbctl.c	2023-03-03 18:37:48.000000000 +0000
@@ -109,6 +109,15 @@
                         struct ovsdb_idl *idl, int argc, char *argv[]);
 static void ovn_dbctl_exit(int status);
 
+static void
+destroy_argv(int argc, char **argv)
+{
+    for (int i = 0; i < argc; i++) {
+        free(argv[i]);
+    }
+    free(argv);
+}
+
 int
 ovn_dbctl_main(int argc, char *argv[],
                const struct ovn_dbctl_options *dbctl_options)
@@ -151,6 +160,7 @@
     char *error_s = ovs_cmdl_parse_all(argc, argv_, get_all_options(),
                                        &parsed_options, &n_parsed_options);
     if (error_s) {
+        destroy_argv(argc, argv_);
         ctl_fatal("%s", error_s);
     }
 
@@ -179,6 +189,7 @@
     bool daemon_mode = false;
     if (get_detach()) {
         if (argc != optind) {
+            destroy_argv(argc, argv_);
             ctl_fatal("non-option arguments not supported with --detach "
                       "(use --help for help)");
         }
@@ -206,11 +217,8 @@
         if (error) {
             ovsdb_idl_destroy(idl);
             idl = the_idl = NULL;
+            destroy_argv(argc, argv_);
 
-            for (int i = 0; i < argc; i++) {
-                free(argv_[i]);
-            }
-            free(argv_);
             ctl_fatal("%s", error);
         }
 
@@ -239,21 +247,15 @@
         }
         free(commands);
         if (error) {
-            for (int i = 0; i < argc; i++) {
-                free(argv_[i]);
-            }
-            free(argv_);
+            destroy_argv(argc, argv_);
             ctl_fatal("%s", error);
         }
     }
 
     ovsdb_idl_destroy(idl);
     idl = the_idl = NULL;
+    destroy_argv(argc, argv_);
 
-    for (int i = 0; i < argc; i++) {
-        free(argv_[i]);
-    }
-    free(argv_);
     exit(EXIT_SUCCESS);
 }
 
@@ -1240,40 +1242,53 @@
 
     ctl_timeout_setup(timeout);
 
+    char *cmd_result = NULL;
+    char *cmd_error = NULL;
     struct jsonrpc *client;
+    int exit_status;
+    char *error_str;
+
     int error = unixctl_client_create(socket_name, &client);
     if (error) {
-        ctl_fatal("%s: could not connect to %s daemon (%s); "
-                  "unset %s to avoid using daemon",
-                  socket_name, program_name, ovs_strerror(error),
-                  dbctl_options->daemon_env_var_name);
+        error_str = xasprintf("%s: could not connect to %s daemon (%s); "
+                              "unset %s to avoid using daemon",
+                              socket_name, program_name, ovs_strerror(error),
+                              dbctl_options->daemon_env_var_name);
+        goto log_error;
     }
 
-    char *cmd_result;
-    char *cmd_error;
     error = unixctl_client_transact(client, "run",
                                     args.n, args.names,
                                     &cmd_result, &cmd_error);
     if (error) {
-        ctl_fatal("%s: transaction error (%s)",
-                  socket_name, ovs_strerror(error));
+        error_str = xasprintf("%s: transaction error (%s)",
+                              socket_name, ovs_strerror(error));
+        goto log_error;
     }
-    svec_destroy(&args);
 
-    int exit_status;
     if (cmd_error) {
-        exit_status = EXIT_FAILURE;
         fprintf(stderr, "%s: %s", program_name, cmd_error);
-    } else {
-        exit_status = EXIT_SUCCESS;
-        fputs(cmd_result, stdout);
+        goto error;
     }
+
+    exit_status = EXIT_SUCCESS;
+    fputs(cmd_result, stdout);
+    goto cleanup;
+
+log_error:
+    VLOG_ERR("%s", error_str);
+    ovs_error(0, "%s", error_str);
+    free(error_str);
+
+error:
+    exit_status = EXIT_FAILURE;
+
+cleanup:
     free(cmd_result);
     free(cmd_error);
     jsonrpc_close(client);
-    for (int i = 0; i < argc; i++) {
-        free(argv[i]);
-    }
-    free(argv);
+    svec_destroy(&args);
+    destroy_argv(argc, argv);
+
     exit(exit_status);
 }
diff -Nru ovn-23.03.0~git20230221.038cfb1/utilities/ovn-nbctl.c ovn-23.03.0/utilities/ovn-nbctl.c
--- ovn-23.03.0~git20230221.038cfb1/utilities/ovn-nbctl.c	2023-02-17 21:40:31.000000000 +0000
+++ ovn-23.03.0/utilities/ovn-nbctl.c	2023-03-03 18:37:48.000000000 +0000
@@ -4553,7 +4553,7 @@
     }
 
 cleanup:
-    if (next_hop && strlen(next_hop)) {
+    if (next_hop && next_hop[0]) {
         free(next_hop);
     }
     free(prefix);
@@ -6590,12 +6590,12 @@
 
     if (!strcmp(route->nexthop, "discard")) {
         next_hop = xasprintf("discard");
-    } else if (strlen(route->nexthop)) {
+    } else if (route->nexthop[0]) {
         next_hop = normalize_prefix_str(route->nexthop);
     }
     ds_put_format(s, "%25s %25s", prefix, next_hop);
     free(prefix);
-    if (strlen(next_hop)) {
+    if (next_hop[0]) {
         free(next_hop);
     }
 
@@ -6734,8 +6734,8 @@
         if (!i || (i > 0 && strcmp(route->route_table,
                                    ipv4_routes[i - 1].route->route_table))) {
             ds_put_format(&ctx->output, "%sRoute Table %s:\n", i ? "\n" : "",
-                          strlen(route->route_table) ? route->route_table
-                                                     : "<main>");
+                          route->route_table[0] ? route->route_table
+                                                : "<main>");
         }
 
         print_route(ipv4_routes[i].route, &ctx->output, ecmp);
@@ -6760,8 +6760,8 @@
         if (!i || (i > 0 && strcmp(route->route_table,
                                    ipv6_routes[i - 1].route->route_table))) {
             ds_put_format(&ctx->output, "%sRoute Table %s:\n", i ? "\n" : "",
-                          strlen(route->route_table) ? route->route_table
-                                                     : "<main>");
+                          route->route_table[0] ? route->route_table
+                                                : "<main>");
         }
 
         print_route(ipv6_routes[i].route, &ctx->output, ecmp);

Attachment: signature.asc
Description: This is a digitally signed message part


--- End Message ---
--- Begin Message ---
Unblocked.

--- End Message ---

Reply to: