diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 803f71bb1e..070054c7af 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -2639,6 +2639,51 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) return true; } +bool RouteOrch::hasBgpRoute(const IpPrefix& prefix) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t& vrf_id = gVirtualRouterId; + + sai_route_entry_t route_entry; + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, prefix); + auto it_route_table = m_syncdRoutes.find(vrf_id); + if (it_route_table == m_syncdRoutes.end()) + { + SWSS_LOG_INFO("Failed to find route table, vrf_id 0x%" PRIx64 "\n", vrf_id); + return true; + } + auto it_route = it_route_table->second.find(prefix); + size_t creating = gRouteBulker.creating_entries_count(route_entry); + if (it_route == it_route_table->second.end() && creating == 0) + { + SWSS_LOG_INFO("No Route exists for vrf_id 0x%" PRIx64 ", prefix %s\n", vrf_id, + prefix.to_string().c_str()); + return false; + } + return true; +} + +bool RouteOrch::removeRoutePrefix(const IpPrefix& prefix) +{ + // This function removes the route if it exists. + + string key = "ROUTE_TABLE:" + prefix.to_string(); + RouteBulkContext context(key, false); + context.ip_prefix = prefix; + context.vrf_id = gVirtualRouterId; + if (removeRoute(context)) + { + SWSS_LOG_INFO("Could not find the route with prefix %s", prefix.to_string().c_str()); + return true; + } + gRouteBulker.flush(); + return removeRoutePost(context); + +} + bool RouteOrch::createRemoteVtep(sai_object_id_t vrf_id, const NextHopKey &nextHop) { SWSS_LOG_ENTER(); diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 577d966a26..a8c450a2de 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -215,6 +215,8 @@ class RouteOrch : public Orch, public Subject const NextHopGroupKey getSyncdRouteNhgKey(sai_object_id_t vrf_id, const IpPrefix& ipPrefix); bool createFineGrainedNextHopGroup(sai_object_id_t &next_hop_group_id, vector &nhg_attrs); bool removeFineGrainedNextHopGroup(sai_object_id_t &next_hop_group_id); + bool hasBgpRoute(const IpPrefix& prefix); + bool removeRoutePrefix(const IpPrefix& prefix); void addLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal_prefix); void delLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal_prefix); diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 5c482d726d..f148cf7784 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -913,7 +913,7 @@ bool VNetRouteOrch::createNextHopGroup(const string& vnet, VNetVrfObject *vrf_obj, const string& monitoring) { - + SWSS_LOG_INFO("Creating nexthop group from nexthops(%s)\n", nexthops.to_string().c_str()); if (nexthops.getSize() == 0) { return true; @@ -926,6 +926,7 @@ bool VNetRouteOrch::createNextHopGroup(const string& vnet, next_hop_group_entry.ref_count = 0; if (monitoring == "custom" || nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) { + SWSS_LOG_INFO("Adding nexthop: %s to the active group", nexthop.ip_address.to_string().c_str()); next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; } syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; @@ -989,6 +990,7 @@ bool VNetRouteOrch::selectNextHopGroup(const string& vnet, // This is followed by an attempt to create a NHG which can be subset of nexthops_primary // depending on the endpoint monitor state. If no NHG from primary is created, we attempt // the same for secondary. + if(nexthops_secondary.getSize() != 0 && monitoring == "custom") { auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); @@ -1147,6 +1149,22 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } else { + auto prefixToRemove = ipPrefix; + if (adv_prefix.to_string() != ipPrefix.to_string()) + { + prefixToRemove = adv_prefix; + } + auto prefixSubnet = prefixToRemove.getSubnet(); + if(gRouteOrch && gRouteOrch->hasBgpRoute(prefixSubnet)) + { + if (!gRouteOrch->removeRoutePrefix(prefixSubnet)) + { + SWSS_LOG_ERROR("Could not remove existing bgp route for prefix: %s\n", prefixSubnet.to_string().c_str()); + return false; + } + SWSS_LOG_INFO("Successfully removed existing bgp route for prefix: %s\n", + prefixSubnet.to_string().c_str()); + } if (it_route == syncd_tunnel_routes_[vnet].end()) { route_status = add_route(vr_id, pfx, nh_id); @@ -1295,6 +1313,8 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); return false; } + SWSS_LOG_INFO("Successfully deleted the route for prefix: %s", ipPrefix.to_string().c_str()); + } } @@ -1880,6 +1900,7 @@ void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpo { SWSS_LOG_ERROR("BFD session for endpoint %s does not exist", endpoint_addr.to_string().c_str()); } + SWSS_LOG_INFO("Removing nexthop info for endpoint: %s\n", endpoint_addr.to_string().c_str()); nexthop_info_[vnet].erase(endpoint_addr); string key = "default:default:" + monitor_addr.to_string(); @@ -2105,14 +2126,19 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH auto prefix_to_use = ipPrefix; if (prefix_to_adv_prefix_.find(ipPrefix) != prefix_to_adv_prefix_.end()) { - route_state = ""; auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; - if (adv_prefix_refcount_[adv_pfx] == 1) + if (route_state == "active" and adv_prefix_refcount_[adv_pfx] == 1) { - route_state = "active"; prefix_to_use = adv_pfx; } - } + else + { + route_state = ""; + } + } + SWSS_LOG_NOTICE("Advertisement of prefix: %s with profile: %s, status: %s via prefix: %s\n", + ipPrefix.to_string().c_str(), profile.c_str(), + route_state.c_str(), prefix_to_use.to_string().c_str()); if (vnet_orch_->getAdvertisePrefix(vnet)) { if (route_state == "active") @@ -2138,6 +2164,7 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); state_vnet_rt_tunnel_table_->del(state_db_key); + SWSS_LOG_NOTICE("Advertisement of prefix: %s stopped.\n", ipPrefix.to_string().c_str()); if(prefix_to_adv_prefix_.find(ipPrefix) !=prefix_to_adv_prefix_.end()) { @@ -2270,7 +2297,12 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) { continue; } + // when we add the first nexthop to the route, we dont create a nexthop group, we call the updateTunnelRoute with NHG with one member. + // when adding the 2nd, 3rd ... members we create each NH using this create_next_hop_group_member call but give it the reference of next_hop_group_id. + // this way we dont have to update the route, the syncd does it by itself. we only call the updateTunnelRoute to add/remove when adding or removing the + // route fully. + bool failed = false; if (state == SAI_BFD_SESSION_STATE_UP) { sai_object_id_t next_hop_group_member_id = SAI_NULL_OBJECT_ID; @@ -2322,10 +2354,47 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) { for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) { + // remove the bgp learnt route first if any exists and then add the tunnel route. + auto ipPrefixsubnet = ip_pfx.getSubnet(); + auto prefixStr = ip_pfx.to_string(); + auto nhStr = nexthops.to_string(); + if (prefix_to_adv_prefix_.find(ip_pfx) != prefix_to_adv_prefix_.end()) + { + auto adv_prefix = prefix_to_adv_prefix_[ip_pfx]; + if(adv_prefix.to_string() != prefixStr) + { + ipPrefixsubnet = adv_prefix.getSubnet(); + } + } + if(gRouteOrch && gRouteOrch->hasBgpRoute(ipPrefixsubnet)) + { + if (!gRouteOrch->removeRoutePrefix(ipPrefixsubnet)) + { + SWSS_LOG_ERROR("Could not remove existing bgp route for prefix: %s\n", prefixStr.c_str()); + return; + } + SWSS_LOG_INFO("Successfully removed existing bgp route for prefix: %s\n", prefixStr.c_str()); + } string op = SET_COMMAND; - updateTunnelRoute(vnet, ip_pfx, nexthops, op); + SWSS_LOG_INFO("Adding Vnet route for prefix:%s with nexthop group: %s\n", prefixStr.c_str(), nhStr.c_str()); + + if (!updateTunnelRoute(vnet, ip_pfx, nexthops, op)) + { + SWSS_LOG_NOTICE("Failed to create tunnel route in hardware for prefix: %s\n", prefixStr.c_str()); + failed = true; + } + else + { + SWSS_LOG_INFO("Successfully created tunnel route in hardware for prefix: %s\n", prefixStr.c_str()); + } } } + if (failed) + { + // This is an unrecoverable error, Throw a LOG_ERROR and return + SWSS_LOG_ERROR("Inconsistent hardware State. Failed to create tunnel routes.\n"); + return; + } } else { @@ -2351,6 +2420,7 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) } vrf_obj->removeTunnelNextHop(endpoint); + SWSS_LOG_INFO("Successfully removed nexthop: %s\n",endpoint.to_string().c_str() ); gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); } @@ -2366,6 +2436,7 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) { for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) { + SWSS_LOG_NOTICE("Removing Vnet route for prefix : %s due to no active nexthops.\n",ip_pfx.to_string().c_str()); string op = DEL_COMMAND; updateTunnelRoute(vnet, ip_pfx, nexthops, op); } @@ -2373,12 +2444,14 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) } } } - - // Post configured in State DB - for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + if (!failed) { - string profile = vrf_obj->getProfile(ip_pfx); - postRouteState(vnet, ip_pfx, nexthops, profile); + // Post configured in State DB + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + string profile = vrf_obj->getProfile(ip_pfx); + postRouteState(vnet, ip_pfx, nexthops, profile); + } } } } @@ -2439,6 +2512,8 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) copy(pfx, prefix); NextHopGroupKey nhg_custom_primary = getActiveNHSet( vnet, primary, prefix); NextHopGroupKey nhg_custom_secondary = getActiveNHSet( vnet, secondary, prefix); + SWSS_LOG_INFO("Primary active(%s), Secondary active (%s), Current active(%s)\n", nhg_custom_primary.to_string().c_str(), + nhg_custom_secondary.to_string().c_str(), active_nhg.to_string().c_str()); if (nhg_custom_primary.getSize() > 0) { if (nhg_custom_primary != active_nhg ) @@ -2486,7 +2561,7 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if (nhg_custom.getSize() == 0) { // nhg_custom is empty. we shall create a dummy empty NHG for book keeping. - SWSS_LOG_INFO(" Neither Primary or Secondary endpoints are up."); + SWSS_LOG_INFO(" Neither Primary or Secondary endpoints are up.\n"); if (!hasNextHopGroup(vnet, nhg_custom)) { NextHopGroupInfo next_hop_group_entry; @@ -2504,6 +2579,7 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) { if (active_nhg_size > 0) { + SWSS_LOG_INFO(" Removing the route for prefix: %s.",prefix.to_string().c_str()); // we need to remove the route del_route(vr_id, pfx); } @@ -2516,11 +2592,33 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if (active_nhg_size > 0) { // we need to replace the nhg in the route + SWSS_LOG_INFO("Replacing nexthop group for prefix: %s, nexthop group: %s\n", + prefix.to_string().c_str(), nhg_custom.to_string().c_str()); route_status = update_route(vr_id, pfx, nh_id); } else { // we need to readd the route. + SWSS_LOG_NOTICE("Adding Custom monitored Route with prefix: %s and nexthop group: %s\n", + prefix.to_string().c_str(), nhg_custom.to_string().c_str()); + auto prefixToUse = prefix; + if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) + { + auto adv_prefix = prefix_to_adv_prefix_[prefix]; + if(adv_prefix.to_string() != prefix.to_string()) + { + prefixToUse = adv_prefix; + } + } + auto prefixsubnet = prefixToUse.getSubnet(); + if (gRouteOrch && gRouteOrch->hasBgpRoute(prefixsubnet)) + { + if (!gRouteOrch->removeRoutePrefix(prefixsubnet)) + { + SWSS_LOG_ERROR("Could not remove existing bgp route for prefix: %s\n", prefix.to_string().c_str()); + } + SWSS_LOG_INFO("Successfully removed existing bgp route for prefix: %s\n", prefix.to_string().c_str()); + } route_status = add_route(vr_id, pfx, nh_id); } if (!route_status) @@ -2565,8 +2663,10 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) } else { + SWSS_LOG_INFO("Prefix %s no longer references nexthop group: %s\n",prefix.to_string().c_str(), active_nhg.to_string().c_str()); syncd_nexthop_groups_[vnet][active_nhg].tunnel_routes.erase(prefix); } + SWSS_LOG_INFO("Prefix %s now references nexthop group: %s\n",prefix.to_string().c_str(), nhg_custom.to_string().c_str()); syncd_nexthop_groups_[vnet][nhg_custom].tunnel_routes.insert(prefix); syncd_tunnel_routes_[vnet][prefix].nhg_key = nhg_custom; if (nhg_custom != active_nhg) @@ -2576,6 +2676,7 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if (nhg_custom.getSize() == 0 && active_nhg_size > 0) { vrf_obj->removeRoute(prefix); + SWSS_LOG_NOTICE("Route prefix is no longer active: %s\n", prefix.to_string().c_str()); removeRouteState(vnet, prefix); if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) { @@ -2589,12 +2690,15 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) } else if (nhg_custom.getSize() > 0 && active_nhg_size == 0) { - auto adv_prefix = prefix_to_adv_prefix_[prefix]; - if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) { - adv_prefix_refcount_[adv_prefix] = 0; + auto adv_prefix = prefix_to_adv_prefix_[prefix]; + if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + { + adv_prefix_refcount_[adv_prefix] = 0; + } + adv_prefix_refcount_[adv_prefix] += 1; } - adv_prefix_refcount_[adv_prefix] += 1; string profile = vrf_obj->getProfile(prefix); postRouteState(vnet, prefix, nhg_custom, profile); } diff --git a/tests/test_vnet2.py b/tests/test_vnet2.py new file mode 100644 index 0000000000..9ed200f199 --- /dev/null +++ b/tests/test_vnet2.py @@ -0,0 +1,426 @@ +import time +import ipaddress +import json +import random +import time +import pytest + +from swsscommon import swsscommon +from pprint import pprint +from dvslib.dvs_common import wait_for_result +from vnet_lib import * + + +class TestVnet2Orch(object): + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" + + @pytest.fixture + def setup_subnet_decap(self, dvs): + + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + fvs = create_fvs(**subnet_decap_config) + subnet_decap_tbl.set("AZURE", fvs) + + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + _cleanup_subnet_decap_config() + + yield _apply_subnet_decap_config + + _cleanup_subnet_decap_config() + + def get_vnet_obj(self): + return VnetVxlanVrfTunnel() + + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + self.sdb = dvs.get_state_db() + + def clear_srv_config(self, dvs): + dvs.servers[0].runcmd("ip address flush dev eth0") + dvs.servers[1].runcmd("ip address flush dev eth0") + dvs.servers[2].runcmd("ip address flush dev eth0") + dvs.servers[3].runcmd("ip address flush dev eth0") + + def set_admin_status(self, interface, status): + self.cdb.update_entry("PORT", interface, {"admin_status": status}) + + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def check_route_entries(self, destinations, absent=False): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] + for route_entry in route_entries] + return (all(destination in route_destinations for destination in destinations), None) + if absent: + return True if _access_function() == None else False + + wait_for_result(_access_function) + return True + + + ''' + Test 1 - Test for vnet tunnel routes interaction with regular route. + Add the conflicting route and then add the vnet route with same nexthops. + Bring up the bfd sessions and check the vnet route is programmed in hardware. + Remove the vnet route and check the vnet route is removed. + Remove the conflicting route and check the conflicting route is removed. + ''' + def test_vnet_orch_1(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_1' + vnet_name = 'Vnet1' + self.setup_db(dvs) + vnet_obj.fetch_exist_entries(dvs) + # create l3 interface and bring it up + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "20.20.20.1/24") + self.set_admin_status("Ethernet0", "down") + time.sleep(1) + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 20.20.20.5/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 20.20.20.1") + + # create vxlan tunnel and verfiy it + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '1001', "") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '1001') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + + # add conflicting route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 103.100.1.1/32 20.20.20.5\"") + + # check ASIC route database + self.check_route_entries(["103.100.1.1/32"]) + + create_vnet_routes(dvs, "103.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2'], tunnel_name) + + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # Remove all endpoint from group route shouldnt come back up. + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["103.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "103.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "103.100.1.1/32") + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + vnet_obj.nhg_ids = {} + vnet_obj.fetch_exist_entries(dvs) + # readd the same route. + create_vnet_routes(dvs, "103.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2'], tunnel_name) + + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # Remove all endpoint from group route shouldnt come back up. + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["103.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "103.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "103.100.1.1/32") + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 103.100.1.1/32\"") + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 2 - Test for vnet tunnel routes interaction with regular route with endpoints bieng up. + Add the conflicting route and then add the vnet route with same nexthops. + Bring up the bfd sessions and check the vnet route is programmed in hardware. + Add the 2nd conflicting route and then add the 2nd vnet route with same nexthops as first vnet route. + This way we check if the newly added route works when the nexthops are already UP. + Verify the vnet routes are programmed in hardware. + Remove all the vnet route and check the vnet route is removed. + Remove all the conflicting route and check the conflicting route is removed. + ''' + def test_vnet_orch_2(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_2' + vnet_name = 'Vnet2' + self.setup_db(dvs) + vnet_obj.fetch_exist_entries(dvs) + + # create l3 interface and bring it up + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "20.20.20.1/24") + self.set_admin_status("Ethernet0", "down") + time.sleep(1) + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 20.20.20.6/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 20.20.20.1") + + # create vxlan tunnel and verfiy it + create_vxlan_tunnel(dvs, tunnel_name, '9.8.8.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '1002', "") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '1002') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.8.8.9') + vnet_obj.fetch_exist_entries(dvs) + + # add conflicting route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 200.100.1.1/32 20.20.20.6\"") + + # check ASIC route database + self.check_route_entries(["200.100.1.1/32"]) + + create_vnet_routes(dvs, "200.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2'], tunnel_name) + + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # create a new regular and vnet route with same different prefix but same nexthops as before. + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 200.200.1.1/32 20.20.20.6\"") + # check ASIC route database + self.check_route_entries(["200.200.1.1/32"]) + + create_vnet_routes(dvs, "200.200.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 200.100.1.1/32 20.20.20.6\"") + + # Remove all endpoint from group route shouldnt come back up. + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed. + self.check_route_entries(["200.100.1.1/32"], absent=True) + self.check_route_entries(["200.200.1.1/32"], absent=True) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "200.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + + # Remove tunnel route 2 + delete_vnet_routes(dvs, "200.200.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.200.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "200.200.1.1/32") + check_remove_routes_advertisement(dvs, "200.200.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + + ''' + Test 3 - Test for vnet tunnel routes (custom monitoring) interaction with regular route. + Add the conflicting route and then add the vnet route with same nexthops. + Bring up the bfd sessions and check the vnet route is programmed in hardware. + Remove the vnet route and check the vnet route is removed. + Remove the conflicting route and check the conflicting route is removed. + ''' + def test_vnet_orch_3(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_3' + vnet_name = 'Vnet3' + self.setup_db(dvs) + vnet_obj.fetch_exist_entries(dvs) + # create l3 interface and bring it up + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "20.20.20.1/24") + self.set_admin_status("Ethernet0", "down") + time.sleep(1) + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 20.20.20.7/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 20.20.20.1") + + # create vxlan tunnel and verfiy it + create_vxlan_tunnel(dvs, tunnel_name, '19.19.19.19') + create_vnet_entry(dvs, vnet_name, tunnel_name, '1003', "", '', advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '1003') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '19.19.19.19') + + vnet_obj.fetch_exist_entries(dvs) + + # add conflicting route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 105.100.1.1/32 20.20.20.7\"") + + # check ASIC route database + self.check_route_entries(["105.100.1.1/32"]) + + create_vnet_routes(dvs, "105.100.1.1/32", vnet_name, '9.7.0.1,9.7.0.2,9.7.0.3,9.7.0.4', ep_monitor='9.1.2.1,9.1.2.2,9.1.2.3,9.1.2.4',profile = "test_prf", primary='9.7.0.1,9.7.0.2', monitoring='custom',adv_prefix='105.100.1.1/32') + # Route should be properly configured when all monitor session states go up + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'up') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.3', 'up') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'up') + time.sleep(1) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.7.0.2,9.7.0.1'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "105.100.1.1/32", ['9.7.0.1', '9.7.0.2']) + + # Remove all endpoint from group route shouldnt come back up. + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'down') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'down') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.3', 'down') + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["105.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "105.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["105.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "105.100.1.1/32") + check_remove_routes_advertisement(dvs, "105.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + + vnet_obj.nhg_ids = {} + vnet_obj.fetch_exist_entries(dvs) + # readd the same route. + create_vnet_routes(dvs, "105.100.1.1/32", vnet_name, '9.7.0.1,9.7.0.2,9.7.0.3,9.7.0.4', ep_monitor='9.1.2.1,9.1.2.2,9.1.2.3,9.1.2.4',primary='9.7.0.1,9.7.0.2', monitoring='custom') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["105.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "105.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "105.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'up') + time.sleep(1) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.7.0.2'], tunnel_name) + + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'up') + time.sleep(1) + vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.7.0.2,9.7.0.1'], tunnel_name) + + # Remove all endpoint from group route shouldnt come back up. + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'down') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["105.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "105.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["105.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "105.100.1.1/32") + check_remove_routes_advertisement(dvs, "105.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 105.100.1.1/32\"") + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass