From 5dfe9aa6fb0aff659eab7913d9c60ccc880a3e13 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 8 Oct 2024 15:35:44 +0300 Subject: [PATCH 1/3] Add support for single IP ranges to ipam-node --- pkg/ip/cidr.go | 12 +++ pkg/ip/cidr_test.go | 66 +++++++++++++ pkg/ipam-node/allocator/allocator_test.go | 40 ++++++++ pkg/ipam-node/allocator/range.go | 10 +- pkg/ipam-node/allocator/range_test.go | 110 +++++++++++++++++++--- 5 files changed, 219 insertions(+), 19 deletions(-) diff --git a/pkg/ip/cidr.go b/pkg/ip/cidr.go index 1da054c..d93e46a 100644 --- a/pkg/ip/cidr.go +++ b/pkg/ip/cidr.go @@ -138,6 +138,9 @@ func IsBroadcast(ip net.IP, network *net.IPNet) bool { if network.IP.To4() == nil { return false } + if IsPointToPointSubnet(network) || IsSingleIPSubnet(network) { + return false + } if !network.Contains(ip) { return false } @@ -153,8 +156,17 @@ func IsPointToPointSubnet(network *net.IPNet) bool { return ones == maskLen-1 } +// IsSingleIPSubnet returns true if the network is a single IP subnet (/32 or /128) +func IsSingleIPSubnet(network *net.IPNet) bool { + ones, maskLen := network.Mask.Size() + return ones == maskLen +} + // LastIP returns the last IP of a subnet, excluding the broadcast if IPv4 (if not /31 net) func LastIP(network *net.IPNet) net.IP { + if IsSingleIPSubnet(network) { + return network.IP + } var end net.IP for i := 0; i < len(network.IP); i++ { end = append(end, network.IP[i]|^network.Mask[i]) diff --git a/pkg/ip/cidr_test.go b/pkg/ip/cidr_test.go index f45f65d..fcdc714 100644 --- a/pkg/ip/cidr_test.go +++ b/pkg/ip/cidr_test.go @@ -311,6 +311,22 @@ var _ = Describe("CIDR functions", func() { testNet, true, }, + { + net.ParseIP("192.168.0.10"), + func() *net.IPNet { + _, testNet, _ := net.ParseCIDR("192.168.0.10/32") + return testNet + }(), + false, + }, + { + net.ParseIP("192.168.0.1"), + func() *net.IPNet { + _, testNet, _ := net.ParseCIDR("192.168.0.0/31") + return testNet + }(), + false, + }, } for _, test := range testCases { @@ -373,6 +389,30 @@ var _ = Describe("CIDR functions", func() { Expect(gen().String()).To(Equal("::2/127")) Expect(gen().String()).To(Equal("::4/127")) }) + It("valid - single IP IPv4 subnet", func() { + _, net, _ := net.ParseCIDR("192.168.0.0/16") + gen := GetSubnetGen(net, 32) + Expect(gen).NotTo(BeNil()) + Expect(gen().String()).To(Equal("192.168.0.0/32")) + Expect(gen().String()).To(Equal("192.168.0.1/32")) + Expect(gen().String()).To(Equal("192.168.0.2/32")) + }) + It("valid - single IP IPv6 subnet", func() { + _, net, _ := net.ParseCIDR("2002:0:0:1234::/64") + gen := GetSubnetGen(net, 128) + Expect(gen).NotTo(BeNil()) + Expect(gen().String()).To(Equal("2002:0:0:1234::/128")) + Expect(gen().String()).To(Equal("2002:0:0:1234::1/128")) + Expect(gen().String()).To(Equal("2002:0:0:1234::2/128")) + }) + It("valid - single IP IPv4 subnet, point to point network", func() { + _, net, _ := net.ParseCIDR("192.168.0.0/31") + gen := GetSubnetGen(net, 32) + Expect(gen).NotTo(BeNil()) + Expect(gen().String()).To(Equal("192.168.0.0/32")) + Expect(gen().String()).To(Equal("192.168.0.1/32")) + Expect(gen()).To(BeNil()) + }) }) Context("IsPointToPointSubnet", func() { It("/31", func() { @@ -388,6 +428,24 @@ var _ = Describe("CIDR functions", func() { Expect(IsPointToPointSubnet(network)).To(BeFalse()) }) }) + Context("IsSingleIPSubnet", func() { + It("/32", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/32") + Expect(IsSingleIPSubnet(network)).To(BeTrue()) + }) + It("/128", func() { + _, network, _ := net.ParseCIDR("2002:0:0:1234::1/128") + Expect(IsSingleIPSubnet(network)).To(BeTrue()) + }) + It("/24", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/24") + Expect(IsSingleIPSubnet(network)).To(BeFalse()) + }) + It("/31", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/31") + Expect(IsSingleIPSubnet(network)).To(BeFalse()) + }) + }) Context("LastIP", func() { It("/31", func() { _, network, _ := net.ParseCIDR("192.168.1.0/31") @@ -397,6 +455,14 @@ var _ = Describe("CIDR functions", func() { _, network, _ := net.ParseCIDR("2002:0:0:1234::0/127") Expect(LastIP(network).String()).To(Equal("2002:0:0:1234::1")) }) + It("/32", func() { + _, network, _ := net.ParseCIDR("192.168.1.10/32") + Expect(LastIP(network).String()).To(Equal("192.168.1.10")) + }) + It("/128", func() { + _, network, _ := net.ParseCIDR("2002:0:0:1234::10/128") + Expect(LastIP(network).String()).To(Equal("2002:0:0:1234::10")) + }) It("/24", func() { _, network, _ := net.ParseCIDR("192.168.1.0/24") Expect(LastIP(network).String()).To(Equal("192.168.1.254")) diff --git a/pkg/ipam-node/allocator/allocator_test.go b/pkg/ipam-node/allocator/allocator_test.go index d02a52a..64984a5 100644 --- a/pkg/ipam-node/allocator/allocator_test.go +++ b/pkg/ipam-node/allocator/allocator_test.go @@ -416,6 +416,46 @@ var _ = Describe("allocator", func() { checkAlloc(a, "1", net.IP{192, 168, 1, 1}) }) }) + Context("single ip ranges", func() { + It("/32 network", func() { + session, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = session.Commit() + }() + p := allocator.RangeSet{ + allocator.Range{Subnet: mustSubnet("192.168.1.10/32")}, + } + Expect(p.Canonicalize()).NotTo(HaveOccurred()) + a := allocator.NewIPAllocator(&p, nil, testPoolName, session) + // get range iterator and do the first Next + checkAlloc(a, "0", net.IP{192, 168, 1, 10}) + _, err = a.Allocate("1", testIFName, types.ReservationMetadata{}, nil) + Expect(err).To(MatchError(ContainSubstring("no free addresses in the allocated range"))) + }) + It("/24 network", func() { + session, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = session.Commit() + }() + p := allocator.RangeSet{ + allocator.Range{ + Subnet: mustSubnet("192.168.1.0/24"), + RangeStart: net.ParseIP("192.168.1.100"), + RangeEnd: net.ParseIP("192.168.1.100"), + }, + } + Expect(p.Canonicalize()).NotTo(HaveOccurred()) + a := allocator.NewIPAllocator(&p, nil, testPoolName, session) + // get range iterator and do the first Next + checkAlloc(a, "0", net.IP{192, 168, 1, 100}) + _, err = a.Allocate("1", testIFName, types.ReservationMetadata{}, nil) + Expect(err).To(MatchError(ContainSubstring("no free addresses in the allocated range"))) + }) + }) Context("IP address exclusion", func() { It("should exclude IPs", func() { session, err := storePkg.New( diff --git a/pkg/ipam-node/allocator/range.go b/pkg/ipam-node/allocator/range.go index a547a6c..9f17ae5 100644 --- a/pkg/ipam-node/allocator/range.go +++ b/pkg/ipam-node/allocator/range.go @@ -38,12 +38,6 @@ func (r *Range) Canonicalize() error { return err } - // Can't create an allocator for /32 or /128 networks (single IP) - ones, masklen := r.Subnet.Mask.Size() - if ones > masklen-1 { - return fmt.Errorf("network %s too small to allocate from", (*net.IPNet)(&r.Subnet).String()) - } - if len(r.Subnet.IP) != len(r.Subnet.Mask) { return fmt.Errorf("IPNet IP and Mask version mismatch") } @@ -52,7 +46,7 @@ func (r *Range) Canonicalize() error { networkIP := r.Subnet.IP.Mask(r.Subnet.Mask) if !r.Subnet.IP.Equal(networkIP) { return fmt.Errorf("network has host bits set. "+ - "For a subnet mask of length %d the network address is %s", ones, networkIP.String()) + "Expected subnet address is %s", networkIP.String()) } // validate Gateway only if set @@ -74,7 +68,7 @@ func (r *Range) Canonicalize() error { return fmt.Errorf("RangeStart %s not in network %s", r.RangeStart.String(), (*net.IPNet)(&r.Subnet).String()) } } else { - if ip.IsPointToPointSubnet((*net.IPNet)(&r.Subnet)) { + if ip.IsPointToPointSubnet((*net.IPNet)(&r.Subnet)) || ip.IsSingleIPSubnet((*net.IPNet)(&r.Subnet)) { r.RangeStart = r.Subnet.IP } else { r.RangeStart = ip.NextIP(r.Subnet.IP) diff --git a/pkg/ipam-node/allocator/range_test.go b/pkg/ipam-node/allocator/range_test.go index 335e3a2..88cf79b 100644 --- a/pkg/ipam-node/allocator/range_test.go +++ b/pkg/ipam-node/allocator/range_test.go @@ -65,37 +65,46 @@ var _ = Describe("IP ranges", func() { RangeEnd: net.IP{192, 0, 2, 1}, })) }) + It("should generate sane defaults for a /32 ipv4 subnet", func() { + subnetStr := "192.0.2.10/32" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 10}, + RangeEnd: net.IP{192, 0, 2, 10}, + })) + }) It("should reject ipv4 subnet using a masked address", func() { subnetStr := "192.0.2.12/24" r := allocator.Range{Subnet: mustSubnet(subnetStr)} err := r.Canonicalize() - Expect(err).Should(MatchError("network has host bits set. " + - "For a subnet mask of length 24 the network address is 192.0.2.0")) + Expect(err).Should(MatchError("network has host bits set. Expected subnet address is 192.0.2.0")) }) It("should reject ipv6 subnet using a masked address", func() { subnetStr := "2001:DB8:1::24:19ff:fee1:c44a/64" r := allocator.Range{Subnet: mustSubnet(subnetStr)} err := r.Canonicalize() - Expect(err).Should(MatchError("network has host bits set. " + - "For a subnet mask of length 64 the network address is 2001:db8:1::")) + Expect(err).Should(MatchError("network has host bits set. Expected subnet address is 2001:db8:1::")) }) It("should reject ipv6 prefix with host bit set", func() { subnetStr := "2001:DB8:24:19ff::/63" r := allocator.Range{Subnet: mustSubnet(subnetStr)} err := r.Canonicalize() - Expect(err).Should(MatchError("network has host bits set. " + - "For a subnet mask of length 63 the network address is 2001:db8:24:19fe::")) + Expect(err).Should(MatchError("network has host bits set. Expected subnet address is 2001:db8:24:19fe::")) }) It("should reject ipv4 network with host bit set", func() { subnetStr := "192.168.127.0/23" r := allocator.Range{Subnet: mustSubnet(subnetStr)} err := r.Canonicalize() - Expect(err).Should(MatchError("network has host bits set." + - " For a subnet mask of length 23 the network address is 192.168.126.0")) + Expect(err).Should(MatchError("network has host bits set. Expected subnet address is 192.168.126.0")) }) It("should generate sane defaults for ipv6 with a clean prefix", func() { subnetStr := "2001:DB8:1::/64" @@ -125,10 +134,18 @@ var _ = Describe("IP ranges", func() { })) }) - It("Should reject a network that's too small", func() { - r := allocator.Range{Subnet: mustSubnet("192.0.2.0/32")} + It("should generate sane defaults for /128 ipv6 prefix", func() { + subnetStr := "2001:DB8:1::10/128" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + err := r.Canonicalize() - Expect(err).Should(MatchError("network 192.0.2.0/32 too small to allocate from")) + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.ParseIP("2001:DB8:1::10"), + RangeEnd: net.ParseIP("2001:DB8:1::10"), + })) }) It("should reject invalid RangeStart and RangeEnd specifications", func() { @@ -169,6 +186,25 @@ var _ = Describe("IP ranges", func() { Expect(err).Should(MatchError("RangeStart 192.0.2.3 not in network 192.0.2.2/31")) }) + It("should reject invalid RangeStart and RangeEnd for single IP networks", func() { + subnetStr := "192.0.2.2/32" + r := allocator.Range{Subnet: mustSubnet(subnetStr), RangeStart: net.ParseIP("192.0.2.1")} + err := r.Canonicalize() + Expect(err).Should(MatchError("RangeStart 192.0.2.1 not in network 192.0.2.2/32")) + + r = allocator.Range{Subnet: mustSubnet(subnetStr), RangeEnd: net.ParseIP("192.0.2.3")} + err = r.Canonicalize() + Expect(err).Should(MatchError("RangeEnd 192.0.2.3 not in network 192.0.2.2/32")) + + r = allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.3"), + RangeEnd: net.ParseIP("192.0.2.2"), + } + err = r.Canonicalize() + Expect(err).Should(MatchError("RangeStart 192.0.2.3 not in network 192.0.2.2/32")) + }) + It("should parse all fields correctly", func() { subnetStr := "192.0.2.0/24" r := allocator.Range{ @@ -222,6 +258,58 @@ var _ = Describe("IP ranges", func() { })) }) + It("Should parse /32 and /128 correctly", func() { + subnetStr := "192.0.2.2/32" + r := allocator.Range{ + Subnet: mustSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.2"), + RangeEnd: net.ParseIP("192.0.2.2"), + Gateway: net.ParseIP("192.0.2.2"), + } + Expect(r.Canonicalize()).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 2}, + RangeEnd: net.IP{192, 0, 2, 2}, + Gateway: net.IP{192, 0, 2, 2}, + })) + + subnetV6Str := "2001:DB8:1::4/128" + r = allocator.Range{ + Subnet: mustSubnet(subnetV6Str), + RangeStart: net.ParseIP("2001:DB8:1::4"), + RangeEnd: net.ParseIP("2001:DB8:1::4"), + Gateway: net.ParseIP("2001:DB8:1::4"), + } + Expect(r.Canonicalize()).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: mustSubnet(subnetV6Str), + RangeStart: net.ParseIP("2001:DB8:1::4"), + RangeEnd: net.ParseIP("2001:DB8:1::4"), + Gateway: net.ParseIP("2001:DB8:1::4"), + })) + }) + + It("Should handle single IP range in a large subnet correctly", func() { + subnetStr := "192.0.2.0/24" + r := allocator.Range{ + Subnet: mustSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.10"), + RangeEnd: net.ParseIP("192.0.2.10"), + Gateway: net.ParseIP("192.0.2.1"), + } + Expect(r.Canonicalize()).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 10}, + RangeEnd: net.IP{192, 0, 2, 10}, + Gateway: net.IP{192, 0, 2, 1}, + })) + }) + It("should accept v4 IPs in range and reject IPs out of range", func() { r := allocator.Range{ Subnet: mustSubnet("192.0.2.0/24"), From f58886d16a4a0033a50bfe78cb290f468b16d91f Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 8 Oct 2024 20:17:27 +0300 Subject: [PATCH 2/3] Add support for small allocation for IPPool CR - Add support for /31(/127) and /32(/128) subnets - Add single IP allocations --- api/v1alpha1/ippool_validate.go | 6 +- pkg/ipam-controller/allocator/allocator.go | 55 ++++-- .../allocator/allocator_test.go | 162 ++++++++++++++++-- pkg/ipam-controller/config/config_test.go | 2 +- 4 files changed, 194 insertions(+), 31 deletions(-) diff --git a/api/v1alpha1/ippool_validate.go b/api/v1alpha1/ippool_validate.go index 8d27866..c3c9440 100644 --- a/api/v1alpha1/ippool_validate.go +++ b/api/v1alpha1/ippool_validate.go @@ -35,13 +35,13 @@ func (r *IPPool) Validate() field.ErrorList { field.NewPath("spec", "subnet"), r.Spec.Subnet, "is invalid subnet")) } - if r.Spec.PerNodeBlockSize < 2 { + if r.Spec.PerNodeBlockSize < 1 { errList = append(errList, field.Invalid( field.NewPath("spec", "perNodeBlockSize"), - r.Spec.PerNodeBlockSize, "must be at least 2")) + r.Spec.PerNodeBlockSize, "must be at least 1")) } - if network != nil && r.Spec.PerNodeBlockSize >= 2 { + if network != nil && r.Spec.PerNodeBlockSize >= 1 { if GetPossibleIPCount(network).Cmp(big.NewInt(int64(r.Spec.PerNodeBlockSize))) < 0 { // config is not valid even if only one node exist in the cluster errList = append(errList, field.Invalid( diff --git a/pkg/ipam-controller/allocator/allocator.go b/pkg/ipam-controller/allocator/allocator.go index 1a8c66a..7327331 100644 --- a/pkg/ipam-controller/allocator/allocator.go +++ b/pkg/ipam-controller/allocator/allocator.go @@ -71,12 +71,16 @@ func (pa *PoolAllocator) AllocateFromPool(ctx context.Context, node string) (*Al return &existingAlloc, nil } allocations := pa.getAllocationsAsSlice() + // determine the first possible range for the subnet var startIP net.IP - if len(allocations) == 0 || ip.Distance(pa.cfg.Subnet.IP, allocations[0].StartIP) > 2 { - // start allocations from the network address if there are no allocations or if the "hole" exist before - // the firs allocation - startIP = ip.NextIP(pa.cfg.Subnet.IP) + if pa.canUseNetworkAddress() { + startIP = pa.cfg.Subnet.IP } else { + startIP = ip.NextIP(pa.cfg.Subnet.IP) + } + // check if the first possible range is already allocated, if so, search for "holes" or use the next subnet + if len(allocations) != 0 && allocations[0].StartIP.Equal(startIP) { + startIP = nil for i := 0; i < len(allocations); i++ { nextI := i + 1 // if last allocation in the list @@ -122,6 +126,12 @@ func (pa *PoolAllocator) Deallocate(ctx context.Context, node string) { } } +// canUseNetworkAddress returns true if it is allowed to use network address in the node range +// it is allowed to use network address if the subnet is point to point of a single IP subnet +func (pa *PoolAllocator) canUseNetworkAddress() bool { + return ip.IsPointToPointSubnet(pa.cfg.Subnet) || ip.IsSingleIPSubnet(pa.cfg.Subnet) +} + // load loads range to the pool allocator with validation for conflicts func (pa *PoolAllocator) load(ctx context.Context, nodeName string, allocRange AllocatedRange) error { log := pa.getLog(ctx, pa.cfg).WithValues("node", nodeName) @@ -147,29 +157,44 @@ func (pa *PoolAllocator) checkAllocation(allocRange AllocatedRange) error { if !pa.cfg.Subnet.Contains(allocRange.StartIP) || !pa.cfg.Subnet.Contains(allocRange.EndIP) { return fmt.Errorf("invalid allocation allocators: start or end IP is out of the subnet") } - - if ip.Cmp(allocRange.EndIP, allocRange.StartIP) <= 0 { - return fmt.Errorf("invalid allocation allocators: start IP must be less then end IP") + if ip.Cmp(allocRange.EndIP, allocRange.StartIP) < 0 { + return fmt.Errorf("invalid allocation allocators: start IP must be less or equal to end IP") } - - // check that StartIP of the range has valid offset. - // all ranges have same size, so we can simply check that (StartIP offset - 1) % pa.cfg.PerNodeBlockSize == 0 - // -1 required because we skip network addressee (e.g. in 192.168.0.0/24, first allocation will be 192.168.0.1) distanceFromNetworkStart := ip.Distance(pa.cfg.Subnet.IP, allocRange.StartIP) - if distanceFromNetworkStart < 1 || - math.Mod(float64(distanceFromNetworkStart)-1, float64(pa.cfg.PerNodeBlockSize)) != 0 { - return fmt.Errorf("invalid start IP offset") + // check that StartIP of the range has valid offset. + // all ranges have same size, so we can simply check that (StartIP offset) % pa.cfg.PerNodeBlockSize == 0 + if pa.canUseNetworkAddress() { + if math.Mod(float64(distanceFromNetworkStart), float64(pa.cfg.PerNodeBlockSize)) != 0 { + return fmt.Errorf("invalid start IP offset") + } + } else { + if distanceFromNetworkStart < 1 || + // -1 required because we skip network address (e.g. in 192.168.0.0/24, first allocation will be 192.168.0.1) + math.Mod(float64(distanceFromNetworkStart)-1, float64(pa.cfg.PerNodeBlockSize)) != 0 { + return fmt.Errorf("invalid start IP offset") + } } if ip.Distance(allocRange.StartIP, allocRange.EndIP) != int64(pa.cfg.PerNodeBlockSize)-1 { return fmt.Errorf("ip count mismatch") } + // for single IP ranges we need to discard allocation if it matches the gateway + if pa.cfg.PerNodeBlockSize == 1 && pa.cfg.Gateway != nil && allocRange.StartIP.Equal(pa.cfg.Gateway) { + return fmt.Errorf("gw can't be allocated when perNodeBlockSize is 1") + } return nil } // return slice with allocated ranges. // ranges are not overlap and are sorted, but there can be "holes" between ranges func (pa *PoolAllocator) getAllocationsAsSlice() []AllocatedRange { - allocatedRanges := make([]AllocatedRange, 0, len(pa.allocations)) + allocatedRanges := make([]AllocatedRange, 0, len(pa.allocations)+1) + + if pa.cfg.PerNodeBlockSize == 1 && pa.cfg.Gateway != nil { + // in case if perNodeBlockSize is 1 we should not allocate the gateway, + // add a "virtual" allocation for the gateway if we detect that only 1 IP is requested per node, + // this allocation should never be exposed to the CR's status + allocatedRanges = append(allocatedRanges, AllocatedRange{StartIP: pa.cfg.Gateway, EndIP: pa.cfg.Gateway}) + } for _, a := range pa.allocations { allocatedRanges = append(allocatedRanges, a) } diff --git a/pkg/ipam-controller/allocator/allocator_test.go b/pkg/ipam-controller/allocator/allocator_test.go index 4a6264d..9091b21 100644 --- a/pkg/ipam-controller/allocator/allocator_test.go +++ b/pkg/ipam-controller/allocator/allocator_test.go @@ -35,7 +35,7 @@ const ( testPoolName1 = "pool1" testPoolName2 = "pool2" testPerNodeBlockCount1 = 15 - testPerNodeBlockCount2 = 10 + testPerNodeBlockCount2 = 1 ) func getPool1() *ipamv1alpha1.IPPool { @@ -54,7 +54,7 @@ func getPool2() *ipamv1alpha1.IPPool { Spec: ipamv1alpha1.IPPoolSpec{ Subnet: "172.16.0.0/16", PerNodeBlockSize: testPerNodeBlockCount2, - Gateway: "172.16.0.1"}, + Gateway: "172.16.0.3"}, } } @@ -79,7 +79,7 @@ var _ = Describe("Allocator", func() { Expect(node1AllocPool1.StartIP.String()).To(BeEquivalentTo("192.168.0.1")) Expect(node1AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.15")) Expect(node1AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.1")) - Expect(node1AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.10")) + Expect(node1AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.1")) node1AllocSecondCall, err := pa1.AllocateFromPool(ctx, testNodeName1) Expect(err).NotTo(HaveOccurred()) @@ -95,8 +95,8 @@ var _ = Describe("Allocator", func() { Expect(err).NotTo(HaveOccurred()) Expect(node2AllocPool1.StartIP.String()).To(BeEquivalentTo("192.168.0.16")) Expect(node2AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.30")) - Expect(node2AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.11")) - Expect(node2AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.20")) + Expect(node2AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.2")) + Expect(node2AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.2")) node3AllocPool1, err := pa1.AllocateFromPool(ctx, testNodeName3) Expect(err).NotTo(HaveOccurred()) @@ -104,8 +104,8 @@ var _ = Describe("Allocator", func() { Expect(err).NotTo(HaveOccurred()) Expect(node3AllocPool1.StartIP.String()).To(BeEquivalentTo("192.168.0.31")) Expect(node3AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.45")) - Expect(node3AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.21")) - Expect(node3AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.30")) + Expect(node3AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.4")) + Expect(node3AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.4")) node4AllocPool1, err := pa1.AllocateFromPool(ctx, testNodeName4) Expect(err).NotTo(HaveOccurred()) @@ -113,8 +113,8 @@ var _ = Describe("Allocator", func() { Expect(err).NotTo(HaveOccurred()) Expect(node4AllocPool1.StartIP.String()).To(BeEquivalentTo("192.168.0.46")) Expect(node4AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.60")) - Expect(node4AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.31")) - Expect(node4AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.40")) + Expect(node4AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.5")) + Expect(node4AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.5")) // deallocate for node3 and node1 pa1.Deallocate(ctx, testNodeName1) @@ -130,7 +130,7 @@ var _ = Describe("Allocator", func() { Expect(node3AllocPool1.StartIP.String()).To(BeEquivalentTo("192.168.0.1")) Expect(node3AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.15")) Expect(node3AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.1")) - Expect(node3AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.10")) + Expect(node3AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.1")) node1AllocPool1, err = pa1.AllocateFromPool(ctx, testNodeName1) Expect(err).ToNot(HaveOccurred()) @@ -138,8 +138,8 @@ var _ = Describe("Allocator", func() { Expect(err).ToNot(HaveOccurred()) Expect(node1AllocPool1.StartIP.String()).To(BeEquivalentTo("192.168.0.31")) Expect(node1AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.45")) - Expect(node1AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.21")) - Expect(node1AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.30")) + Expect(node1AllocPool2.StartIP.String()).To(BeEquivalentTo("172.16.0.4")) + Expect(node1AllocPool2.EndIP.String()).To(BeEquivalentTo("172.16.0.4")) }) It("Deallocate from pool", func() { @@ -236,6 +236,144 @@ var _ = Describe("Allocator", func() { Expect(node2AllocPool1.EndIP.String()).To(BeEquivalentTo("192.168.0.30")) }) + It("Load single IP range", func() { + pool2 := getPool2() + pool2.Status = ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + { + NodeName: testNodeName1, + StartIP: "172.16.0.1", + EndIP: "172.16.0.1", + }, + { + // should discard, overlaps with GW + NodeName: testNodeName2, + StartIP: "172.16.0.3", + EndIP: "172.16.0.3", + }, + { + NodeName: testNodeName3, + StartIP: "172.16.0.4", + EndIP: "172.16.0.4", + }, + }, + } + selectedNodes := sets.New(testNodeName1, testNodeName2) + a := allocator.CreatePoolAllocatorFromIPPool(ctx, pool2, selectedNodes) + node1AllocPool, err := a.AllocateFromPool(ctx, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1AllocPool.StartIP.String()).To(BeEquivalentTo("172.16.0.1")) + Expect(node1AllocPool.EndIP.String()).To(BeEquivalentTo("172.16.0.1")) + node2AllocPool, err := a.AllocateFromPool(ctx, testNodeName2) + Expect(err).ToNot(HaveOccurred()) + // should get the new IP + Expect(node2AllocPool.StartIP.String()).To(BeEquivalentTo("172.16.0.2")) + Expect(node2AllocPool.EndIP.String()).To(BeEquivalentTo("172.16.0.2")) + // should get IP from the status + node3AllocPool, err := a.AllocateFromPool(ctx, testNodeName3) + Expect(err).ToNot(HaveOccurred()) + Expect(node3AllocPool.StartIP.String()).To(BeEquivalentTo("172.16.0.4")) + Expect(node3AllocPool.EndIP.String()).To(BeEquivalentTo("172.16.0.4")) + }) + + Context("small pools", func() { + It("/32 pool - can allocate if no gw", func() { + pool := &ipamv1alpha1.IPPool{ + ObjectMeta: v1.ObjectMeta{Name: "small-pool"}, + Spec: ipamv1alpha1.IPPoolSpec{ + Subnet: "10.10.10.10/32", + PerNodeBlockSize: 1, + }, + } + a := allocator.CreatePoolAllocatorFromIPPool(ctx, pool, sets.New(testNodeName1, testNodeName2)) + node1Alloc, err := a.AllocateFromPool(ctx, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1Alloc.StartIP.String()).To(BeEquivalentTo("10.10.10.10")) + Expect(node1Alloc.EndIP.String()).To(BeEquivalentTo("10.10.10.10")) + _, err = a.AllocateFromPool(ctx, testNodeName2) + Expect(errors.Is(err, allocator.ErrNoFreeRanges)).To(BeTrue()) + }) + It("/32 pool - can't allocate if gw set", func() { + pool := &ipamv1alpha1.IPPool{ + ObjectMeta: v1.ObjectMeta{Name: "small-pool"}, + Spec: ipamv1alpha1.IPPoolSpec{ + Subnet: "10.10.10.10/32", + PerNodeBlockSize: 1, + Gateway: "10.10.10.10", + }, + } + a := allocator.CreatePoolAllocatorFromIPPool(ctx, pool, sets.New(testNodeName1, testNodeName2)) + _, err := a.AllocateFromPool(ctx, testNodeName1) + Expect(errors.Is(err, allocator.ErrNoFreeRanges)).To(BeTrue()) + }) + It("/31 pool - can allocate 2 ips", func() { + pool := &ipamv1alpha1.IPPool{ + ObjectMeta: v1.ObjectMeta{Name: "small-pool"}, + Spec: ipamv1alpha1.IPPoolSpec{ + Subnet: "10.10.10.10/31", + PerNodeBlockSize: 2, + }, + } + a := allocator.CreatePoolAllocatorFromIPPool(ctx, pool, sets.New(testNodeName1, testNodeName2)) + node1Alloc, err := a.AllocateFromPool(ctx, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1Alloc.StartIP.String()).To(BeEquivalentTo("10.10.10.10")) + Expect(node1Alloc.EndIP.String()).To(BeEquivalentTo("10.10.10.11")) + _, err = a.AllocateFromPool(ctx, testNodeName2) + Expect(errors.Is(err, allocator.ErrNoFreeRanges)).To(BeTrue()) + }) + It("/31 pool - can allocate 1 ip for 2 nodes", func() { + pool := &ipamv1alpha1.IPPool{ + ObjectMeta: v1.ObjectMeta{Name: "small-pool"}, + Spec: ipamv1alpha1.IPPoolSpec{ + Subnet: "10.10.10.10/31", + PerNodeBlockSize: 1, + }, + } + a := allocator.CreatePoolAllocatorFromIPPool(ctx, pool, sets.New(testNodeName1, testNodeName2)) + node1Alloc, err := a.AllocateFromPool(ctx, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1Alloc.StartIP.String()).To(BeEquivalentTo("10.10.10.10")) + Expect(node1Alloc.EndIP.String()).To(BeEquivalentTo("10.10.10.10")) + node2Alloc, err := a.AllocateFromPool(ctx, testNodeName2) + Expect(err).ToNot(HaveOccurred()) + Expect(node2Alloc.StartIP.String()).To(BeEquivalentTo("10.10.10.11")) + Expect(node2Alloc.EndIP.String()).To(BeEquivalentTo("10.10.10.11")) + }) + It("/31 pool - load allocations", func () { + pool := &ipamv1alpha1.IPPool{ + ObjectMeta: v1.ObjectMeta{Name: "small-pool"}, + Spec: ipamv1alpha1.IPPoolSpec{ + Subnet: "10.10.10.10/31", + PerNodeBlockSize: 1, + }, + } + pool.Status = ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + { + NodeName: testNodeName1, + StartIP: "10.10.10.10", + EndIP: "10.10.10.10", + }, + { + NodeName: testNodeName2, + StartIP: "10.10.10.11", + EndIP: "10.10.10.11", + }, + }, + } + a := allocator.CreatePoolAllocatorFromIPPool(ctx, pool, sets.New(testNodeName1, testNodeName2)) + node2Alloc, err := a.AllocateFromPool(ctx, testNodeName2) + Expect(err).ToNot(HaveOccurred()) + Expect(node2Alloc.StartIP.String()).To(BeEquivalentTo("10.10.10.11")) + Expect(node2Alloc.EndIP.String()).To(BeEquivalentTo("10.10.10.11")) + node1Alloc, err := a.AllocateFromPool(ctx, testNodeName1) + Expect(err).ToNot(HaveOccurred()) + Expect(node1Alloc.StartIP.String()).To(BeEquivalentTo("10.10.10.10")) + Expect(node1Alloc.EndIP.String()).To(BeEquivalentTo("10.10.10.10")) + }) + }) + It("ConfigureAndLoadAllocations - Data load test", func() { getValidData := func() *allocator.AllocatedRange { return &allocator.AllocatedRange{ diff --git a/pkg/ipam-controller/config/config_test.go b/pkg/ipam-controller/config/config_test.go index f6e4c65..990332f 100644 --- a/pkg/ipam-controller/config/config_test.go +++ b/pkg/ipam-controller/config/config_test.go @@ -58,7 +58,7 @@ var _ = Describe("Config", func() { }) It("Invalid pool: perNodeBlockSize too small", func() { poolConfig := getValidPool() - poolConfig.PerNodeBlockSize = 1 + poolConfig.PerNodeBlockSize = 0 cfg := &config.Config{Pools: map[string]config.PoolConfig{"pool1": poolConfig}} Expect(cfg.Validate()).To(HaveOccurred()) }) From 07ef8820d852db2275545ba75ea4cb046f06dcf5 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 8 Oct 2024 20:29:15 +0300 Subject: [PATCH 3/3] Add support for /32 and /128 allocations for CIDRPool --- README.md | 2 +- api/v1alpha1/cidrpool_test.go | 8 +- api/v1alpha1/cidrpool_validate.go | 10 +- .../controllers/cidrpool/cidrpool.go | 8 +- .../controllers/cidrpool/cidrpool_test.go | 196 ++++++++++++++++++ 5 files changed, 207 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 10b7c2f..cb5fc7b 100644 --- a/README.md +++ b/README.md @@ -394,7 +394,7 @@ spec: > __Notes:__ > > * pool name is composed of alphanumeric letters separated by dots(`.`) underscores(`_`) or hyphens(`-`). -> * `perNodeBlockSize` minimum size is 2. +> * `perNodeBlockSize` minimum size is 1. > * `subnet` must be large enough to accommodate at least one `perNodeBlockSize` block of IPs. diff --git a/api/v1alpha1/cidrpool_test.go b/api/v1alpha1/cidrpool_test.go index 7a7284f..0c6d5d0 100644 --- a/api/v1alpha1/cidrpool_test.go +++ b/api/v1alpha1/cidrpool_test.go @@ -185,8 +185,8 @@ var _ = Describe("CIDRPool", func() { }, Entry("empty", "", int32(30), false), Entry("invalid value", "aaaa", int32(30), false), - Entry("/32", "192.168.1.1/32", int32(32), false), - Entry("/128", "2001:db8:3333:4444::0/128", int32(128), false), + Entry("/32", "192.168.1.1/32", int32(32), true), + Entry("/128", "2001:db8:3333:4444::0/128", int32(128), true), Entry("valid ipv4", "192.168.1.0/24", int32(30), true), Entry("valid ipv6", "2001:db8:3333:4444::0/64", int32(120), true), ) @@ -203,8 +203,8 @@ var _ = Describe("CIDRPool", func() { Entry("not set", "192.168.0.0/16", int32(0), false), Entry("negative", "192.168.0.0/16", int32(-10), false), Entry("larger than CIDR", "192.168.0.0/16", int32(8), false), - Entry("smaller than 31 for IPv4 pool", "192.168.0.0/16", int32(32), false), - Entry("smaller than 127 for IPv6 pool", "2001:db8:3333:4444::0/64", int32(128), false), + Entry("32 for IPv4 pool", "192.168.0.0/16", int32(32), true), + Entry("128 for IPv6 pool", "2001:db8:3333:4444::0/64", int32(128), true), Entry("match CIDR prefix size - ipv4", "192.168.0.0/16", int32(16), true), Entry("match CIDR prefix size - ipv6", "2001:db8:3333:4444::0/64", int32(64), true), ) diff --git a/api/v1alpha1/cidrpool_validate.go b/api/v1alpha1/cidrpool_validate.go index aa4a895..87ae6f4 100644 --- a/api/v1alpha1/cidrpool_validate.go +++ b/api/v1alpha1/cidrpool_validate.go @@ -60,12 +60,6 @@ func (r *CIDRPool) validateCIDR() field.ErrorList { return field.ErrorList{field.Invalid(field.NewPath("spec", "cidr"), r.Spec.CIDR, "network prefix has host bits set")} } - setBits, bitsTotal := network.Mask.Size() - if setBits == bitsTotal { - return field.ErrorList{field.Invalid( - field.NewPath("spec", "cidr"), r.Spec.CIDR, "single IP prefixes are not supported")} - } - if r.Spec.GatewayIndex != nil && *r.Spec.GatewayIndex < 0 { return field.ErrorList{field.Invalid( field.NewPath("spec", "gatewayIndex"), r.Spec.GatewayIndex, "must not be negative")} @@ -75,9 +69,9 @@ func (r *CIDRPool) validateCIDR() field.ErrorList { return field.ErrorList{field.Invalid( field.NewPath("spec", "perNodeNetworkPrefix"), r.Spec.PerNodeNetworkPrefix, "must not be negative")} } - + setBits, bitsTotal := network.Mask.Size() if r.Spec.PerNodeNetworkPrefix == 0 || - r.Spec.PerNodeNetworkPrefix >= int32(bitsTotal) || + r.Spec.PerNodeNetworkPrefix > int32(bitsTotal) || r.Spec.PerNodeNetworkPrefix < int32(setBits) { return field.ErrorList{field.Invalid( field.NewPath("spec", "perNodeNetworkPrefix"), diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool.go b/pkg/ipam-node/controllers/cidrpool/cidrpool.go index cb5c92f..3c5cde9 100644 --- a/pkg/ipam-node/controllers/cidrpool/cidrpool.go +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool.go @@ -72,7 +72,7 @@ func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } _, nodeSubnet, _ := net.ParseCIDR(alloc.Prefix) startIP := ip.NextIP(nodeSubnet.IP) - if ip.IsPointToPointSubnet(nodeSubnet) { + if ip.IsPointToPointSubnet(nodeSubnet) || ip.IsSingleIPSubnet(nodeSubnet) { startIP = nodeSubnet.IP } endIP := ip.LastIP(nodeSubnet) @@ -80,7 +80,7 @@ func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c for _, r := range cidrPool.Spec.Routes { routes = append(routes, pool.Route{Dst: r.Dst}) } - pool := &pool.Pool{ + p := &pool.Pool{ Name: cidrPool.Name, Subnet: alloc.Prefix, Gateway: alloc.Gateway, @@ -89,9 +89,9 @@ func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c Exclusions: buildExclusions(cidrPool.Spec.Exclusions, nodeSubnet, startIP, endIP), Routes: routes, } - pool.DefaultGateway = cidrPool.Spec.DefaultGateway + p.DefaultGateway = cidrPool.Spec.DefaultGateway reqLog.Info("CIDRPool config updated", "name", cidrPool.Name) - r.PoolManager.UpdatePool(poolKey, pool) + r.PoolManager.UpdatePool(poolKey, p) found = true break } diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go b/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go index 6da5667..92d9e09 100644 --- a/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go @@ -14,13 +14,33 @@ package controllers import ( + "context" "net" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" + poolPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +const ( + testNamespace = "test-ns" + testNodeName = "test-node" ) var _ = Describe("CIDRPool", func() { @@ -67,4 +87,180 @@ var _ = Describe("CIDRPool", func() { }, ), ) + Context("Controller tests", Ordered, func() { + var ( + err error + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + cancelFunc context.CancelFunc + ctx context.Context + poolManager poolPkg.Manager + ) + + BeforeAll(func() { + poolManager = poolPkg.NewManager() + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{"../../../../deploy/crds"}, + CRDInstallOptions: envtest.CRDInstallOptions{ + ErrorIfPathMissing: true, + }, + } + ctx, cancelFunc = context.WithCancel(context.Background()) + Expect(ipamv1alpha1.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred()) + + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}})).To(BeNil()) + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{BindAddress: "0"}, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespace: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Namespaces: map[string]cache.Config{cache.AllNamespaces: {}}}}, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect((&CIDRPoolReconciler{ + PoolManager: poolManager, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + NodeName: testNodeName, + }).SetupWithManager(mgr)).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(mgr.Start(ctx)).NotTo(HaveOccurred()) + }() + }) + AfterAll(func() { + cancelFunc() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + cidrPoolList := &ipamv1alpha1.CIDRPoolList{} + Expect(k8sClient.List(ctx, cidrPoolList)).NotTo(HaveOccurred()) + for _, p := range cidrPoolList.Items { + Expect(k8sClient.Delete(ctx, &p)).NotTo(HaveOccurred()) + } + Eventually(func(g Gomega) { + g.Expect(k8sClient.List(ctx, cidrPoolList)).NotTo(HaveOccurred()) + g.Expect(cidrPoolList.Items).To(BeEmpty()) + g.Expect(poolManager.GetPools()).To(BeEmpty()) + }).WithTimeout(time.Minute).WithPolling(time.Second).Should(Succeed()) + }) + It("Valid pool config", func() { + p := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pool", Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.10.0.0/16", + GatewayIndex: ptr.To[int32](1), + PerNodeNetworkPrefix: 24, + Exclusions: []ipamv1alpha1.ExcludeRange{ + {StartIP: "10.10.33.10", EndIP: "10.10.33.20"}, + {StartIP: "10.10.10.10", EndIP: "10.10.10.20"}, + }, + DefaultGateway: true, + Routes: []ipamv1alpha1.Route{{Dst: "5.5.5.5/32"}}, + }, + } + Expect(k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + p.Status.Allocations = []ipamv1alpha1.CIDRPoolAllocation{{ + NodeName: testNodeName, + Gateway: "10.10.10.1", + Prefix: "10.10.10.0/24", + }} + Expect(k8sClient.Status().Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey( + common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To( + Equal(&pool.Pool{ + Name: "test-pool", + Subnet: "10.10.10.0/24", + StartIP: "10.10.10.1", + EndIP: "10.10.10.254", + Gateway: "10.10.10.1", + Exclusions: []pool.ExclusionRange{{ + StartIP: "10.10.10.10", + EndIP: "10.10.10.20", + }}, + Routes: []pool.Route{{Dst: "5.5.5.5/32"}}, + DefaultGateway: true, + })) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + }) + It("Valid pool config /32 cidr", func() { + p := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pool", Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.10.10.0/24", + PerNodeNetworkPrefix: 32, + }, + } + Expect(k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + p.Status.Allocations = []ipamv1alpha1.CIDRPoolAllocation{{ + NodeName: testNodeName, + Prefix: "10.10.10.12/32", + }} + Expect(k8sClient.Status().Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey( + common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To( + Equal(&pool.Pool{ + Name: "test-pool", + Subnet: "10.10.10.12/32", + StartIP: "10.10.10.12", + EndIP: "10.10.10.12", + Exclusions: []pool.ExclusionRange{}, + Routes: []pool.Route{}, + })) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + }) + It("Update Valid config with invalid", func() { + p := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pool", Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.10.0.0/16", + GatewayIndex: ptr.To[int32](1), + PerNodeNetworkPrefix: 24, + }, + } + Expect(k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + p.Status.Allocations = []ipamv1alpha1.CIDRPoolAllocation{{ + NodeName: testNodeName, + Gateway: "10.10.10.1", + Prefix: "10.10.10.0/24", + }} + Expect(k8sClient.Status().Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey( + common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To( + Equal(&pool.Pool{ + Name: "test-pool", + Subnet: "10.10.10.0/24", + StartIP: "10.10.10.1", + EndIP: "10.10.10.254", + Gateway: "10.10.10.1", + Exclusions: []pool.ExclusionRange{}, + Routes: []pool.Route{}, + })) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + p.Spec.GatewayIndex = ptr.To[int32](10) + Expect(k8sClient.Update(ctx, p)).NotTo(HaveOccurred()) + Eventually(func(g Gomega) { + g.Expect(poolManager.GetPoolByKey(common.GetPoolKey("test-pool", common.PoolTypeCIDRPool))).To(BeNil()) + }).WithTimeout(time.Second * 15).WithPolling(time.Second).Should(Succeed()) + }) + }) })