Skip to content

Commit

Permalink
Add hacky benchmarker options
Browse files Browse the repository at this point in the history
This adds additional benchmarker options, but needs cleanup

Signed-off-by: Kern Walster <[email protected]>
  • Loading branch information
Kern-- committed Nov 10, 2023
1 parent 2e7a474 commit 14d8448
Show file tree
Hide file tree
Showing 5 changed files with 106 additions and 32 deletions.
39 changes: 23 additions & 16 deletions benchmark/benchmarkTests.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,15 @@ func SociFullRun(
pullStart := time.Now()
log.G(ctx).WithField("benchmark", "Test").WithField("event", "Start").Infof("Start Test")
log.G(ctx).WithField("benchmark", "Pull").WithField("event", "Start").Infof("Start Pull Image")
image, err := sociContainerdProc.SociRPullImageFromRegistry(ctx, imageDescriptor.ImageRef, imageDescriptor.ImageRef)
image, err := sociContainerdProc.SociRPullImageFromRegistry(ctx, imageDescriptor.ImageRef, imageDescriptor.SociIndexManifestRef)
log.G(ctx).WithField("benchmark", "Pull").WithField("event", "Stop").Infof("Stop Pull Image")
pullDuration := time.Since(pullStart)
b.ReportMetric(float64(pullDuration.Milliseconds()), "pullDuration")
if err != nil {
fatalf(b, "%s", err)
}
log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Start").Infof("Start Create Container")
container, cleanupContainer, err := sociContainerdProc.CreateSociContainer(ctx, image)
container, cleanupContainer, err := sociContainerdProc.CreateSociContainer(ctx, image, imageDescriptor)
log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Stop").Infof("Stop Create Container")
if err != nil {
fatalf(b, "%s", err)
Expand All @@ -146,15 +146,19 @@ func SociFullRun(
defer cleanupTask()
log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Start").Infof("Start Run Task")
runLazyTaskStart := time.Now()
cleanupRun, err := sociContainerdProc.RunContainerTaskForReadyLine(ctx, taskDetails, imageDescriptor.ReadyLine)
cleanupRun, err := sociContainerdProc.RunContainerTaskForReadyLine(ctx, taskDetails, imageDescriptor.ReadyLine, imageDescriptor.Timeout())
lazyTaskDuration := time.Since(runLazyTaskStart)
log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Stop").Infof("Stop Run Task")
b.ReportMetric(float64(lazyTaskDuration.Milliseconds()), "lazyTaskDuration")
if err != nil {
fatalf(b, "%s", err)
}
defer cleanupRun()
containerSecondRun, cleanupContainerSecondRun, err := sociContainerdProc.CreateSociContainer(ctx, image)
// In order for host networking to work, we need to clean up the task so that any network resources are released before running the second container
// We don't want this cleanup time included in the benchmark, though.
b.StopTimer()
cleanupRun()
b.StartTimer()
containerSecondRun, cleanupContainerSecondRun, err := sociContainerdProc.CreateSociContainer(ctx, image, imageDescriptor)
if err != nil {
fatalf(b, "%s", err)
}
Expand All @@ -166,7 +170,7 @@ func SociFullRun(
defer cleanupTaskSecondRun()
log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Start").Infof("Start Run Task Twice")
runLocalStart := time.Now()
cleanupRunSecond, err := sociContainerdProc.RunContainerTaskForReadyLine(ctx, taskDetailsSecondRun, imageDescriptor.ReadyLine)
cleanupRunSecond, err := sociContainerdProc.RunContainerTaskForReadyLine(ctx, taskDetailsSecondRun, imageDescriptor.ReadyLine, imageDescriptor.Timeout())
localTaskStats := time.Since(runLocalStart)
log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Stop").Infof("Stop Run Task Twice")
b.ReportMetric(float64(localTaskStats.Milliseconds()), "localTaskStats")
Expand Down Expand Up @@ -209,7 +213,7 @@ func OverlayFSFullRun(
fatalf(b, "%s", err)
}
log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Start").Infof("Start Create Container")
container, cleanupContainer, err := containerdProcess.CreateContainer(ctx, image)
container, cleanupContainer, err := containerdProcess.CreateContainer(ctx, imageDescriptor.ContainerOpts(image))
log.G(ctx).WithField("benchmark", "CreateContainer").WithField("event", "Stop").Infof("Stop Create Container")
if err != nil {
fatalf(b, "%s", err)
Expand All @@ -224,15 +228,19 @@ func OverlayFSFullRun(
defer cleanupTask()
log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Start").Infof("Start Run Task")
runLazyTaskStart := time.Now()
cleanupRun, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetails, imageDescriptor.ReadyLine)
cleanupRun, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetails, imageDescriptor.ReadyLine, imageDescriptor.Timeout())
lazyTaskDuration := time.Since(runLazyTaskStart)
log.G(ctx).WithField("benchmark", "RunTask").WithField("event", "Stop").Infof("Stop Run Task")
b.ReportMetric(float64(lazyTaskDuration.Milliseconds()), "lazyTaskDuration")
if err != nil {
fatalf(b, "%s", err)
}
defer cleanupRun()
containerSecondRun, cleanupContainerSecondRun, err := containerdProcess.CreateContainer(ctx, image)
// In order for host networking to work, we need to clean up the task so that any network resources are released before running the second container
// We don't want this cleanup time included in the benchmark, though.
b.StopTimer()
cleanupRun()
b.StartTimer()
containerSecondRun, cleanupContainerSecondRun, err := containerdProcess.CreateContainer(ctx, imageDescriptor.ContainerOpts(image))
if err != nil {
fatalf(b, "%s", err)
}
Expand All @@ -244,7 +252,7 @@ func OverlayFSFullRun(
defer cleanupTaskSecondRun()
log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Start").Infof("Start Run Task Twice")
runLocalStart := time.Now()
cleanupRunSecond, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetailsSecondRun, imageDescriptor.ReadyLine)
cleanupRunSecond, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetailsSecondRun, imageDescriptor.ReadyLine, imageDescriptor.Timeout())
localTaskStats := time.Since(runLocalStart)
log.G(ctx).WithField("benchmark", "RunTaskTwice").WithField("event", "Stop").Infof("Stop Run Task Twice")
b.ReportMetric(float64(localTaskStats.Milliseconds()), "localTaskStats")
Expand All @@ -259,8 +267,7 @@ func OverlayFSFullRun(
func StargzFullRun(
ctx context.Context,
b *testing.B,
imageRef string,
readyLine string,
imageDescriptor ImageDescriptor,
stargzBinary string) {
containerdProcess, err := getContainerdProcess(ctx, containerdStargzConfig)
if err != nil {
Expand All @@ -274,11 +281,11 @@ func StargzFullRun(
defer stargzProcess.StopProcess()
stargzContainerdProc := StargzContainerdProcess{containerdProcess}
b.ResetTimer()
image, err := stargzContainerdProc.StargzRpullImageFromRegistry(ctx, imageRef)
image, err := stargzContainerdProc.StargzRpullImageFromRegistry(ctx, imageDescriptor.ImageRef)
if err != nil {
fatalf(b, "%s", err)
}
container, cleanupContainer, err := stargzContainerdProc.CreateContainer(ctx, image, containerd.WithSnapshotter("stargz"))
container, cleanupContainer, err := stargzContainerdProc.CreateContainer(ctx, imageDescriptor.ContainerOpts(image, containerd.WithSnapshotter("stargz")))
if err != nil {
fatalf(b, "%s", err)
}
Expand All @@ -288,7 +295,7 @@ func StargzFullRun(
fatalf(b, "%s", err)
}
defer cleanupTask()
cleanupRun, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetails, readyLine)
cleanupRun, err := containerdProcess.RunContainerTaskForReadyLine(ctx, taskDetails, imageDescriptor.ReadyLine, imageDescriptor.Timeout())
if err != nil {
fatalf(b, "%s", err)
}
Expand Down
11 changes: 4 additions & 7 deletions benchmark/framework/containerd_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/oci"
"github.com/containerd/log"
"github.com/sirupsen/logrus"
)
Expand Down Expand Up @@ -138,11 +137,8 @@ func (proc *ContainerdProcess) DeleteImage(ctx context.Context, imageRef string)

func (proc *ContainerdProcess) CreateContainer(
ctx context.Context,
image containerd.Image,
opts ...containerd.NewContainerOpts) (containerd.Container, func(), error) {
opts []containerd.NewContainerOpts) (containerd.Container, func(), error) {
id := fmt.Sprintf("%s-%d", testContainerID, time.Now().UnixNano())
opts = append(opts, containerd.WithNewSnapshot(id, image))
opts = append(opts, containerd.WithNewSpec(oci.WithImageConfig(image)))
container, err := proc.Client.NewContainer(
ctx,
id,
Expand Down Expand Up @@ -205,7 +201,8 @@ func (proc *ContainerdProcess) CreateTask(
func (proc *ContainerdProcess) RunContainerTaskForReadyLine(
ctx context.Context,
taskDetails *TaskDetails,
readyLine string) (func(), error) {
readyLine string,
timeout time.Duration) (func(), error) {
stdoutScanner := bufio.NewScanner(taskDetails.stdoutReader)
stderrScanner := bufio.NewScanner(taskDetails.stderrReader)

Expand All @@ -214,7 +211,7 @@ func (proc *ContainerdProcess) RunContainerTaskForReadyLine(
return nil, err
}
resultChannel := make(chan string, 1)
timeoutCtx, cancel := context.WithTimeout(context.Background(), 180*time.Second)
timeoutCtx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
go func() {
select {
Expand Down
5 changes: 3 additions & 2 deletions benchmark/soci_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ func (proc *SociContainerdProcess) SociRPullImageFromRegistry(

func (proc *SociContainerdProcess) CreateSociContainer(
ctx context.Context,
image containerd.Image) (containerd.Container, func(), error) {
return proc.CreateContainer(ctx, image, containerd.WithSnapshotter("soci"))
image containerd.Image,
imageDescriptor ImageDescriptor) (containerd.Container, func(), error) {
return proc.CreateContainer(ctx, imageDescriptor.ContainerOpts(image, containerd.WithSnapshotter("soci")))
}
4 changes: 1 addition & 3 deletions benchmark/stargzTest/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,11 @@ func main() {
var drivers []framework.BenchmarkTestDriver
for _, image := range imageList {
shortName := image.ShortName
imageRef := image.ImageRef
readyLine := image.ReadyLine
drivers = append(drivers, framework.BenchmarkTestDriver{
TestName: "StargzFullRun" + shortName,
NumberOfTests: numberOfTests,
TestFunction: func(b *testing.B) {
benchmark.StargzFullRun(ctx, b, imageRef, readyLine, stargzBinary)
benchmark.StargzFullRun(ctx, b, image, stargzBinary)
},
})
}
Expand Down
79 changes: 75 additions & 4 deletions benchmark/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,87 @@ import (
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"time"

"github.com/containerd/containerd"
"github.com/containerd/containerd/contrib/nvidia"
"github.com/containerd/containerd/oci"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
)

const testContainerID = "TEST_RUN_CONTAINER"

type ImageDescriptor struct {
ShortName string `json:"short_name"`
ImageRef string `json:"image_ref"`
SociIndexManifestRef string `json:"soci_index_manifest_ref"`
ReadyLine string `json:"ready_line"`
ShortName string `json:"short_name"`
ImageRef string `json:"image_ref"`
SociIndexManifestRef string `json:"soci_index_manifest_ref"`
ReadyLine string `json:"ready_line"`
TimeoutSec int64 `json:"timeout_sec"`
ImageOptions ImageOptions `json:"options"`
}

func (i *ImageDescriptor) Timeout() time.Duration {
if i.TimeoutSec <= 0 {
return 180 * time.Second
}
return time.Duration(i.TimeoutSec) * time.Second
}

// ImageOptions contains image-specific options needed to run the tests
type ImageOptions struct {
// Net indicicates the container's network mode. If set to "host" then the container will have host networking, otherwise no networking.
Net string
// Mounts are any mounts needed by the container
Mounts []runtimespec.Mount
// Gpu is whether the container needs GPUs. If true, all GPUs are mounted in the container
Gpu bool
// Env is any environment variables needed by the containerd
Env []string
// ShmSize is the size of /dev/shm to be used inside the container
ShmSize int64
}

// ContainerOpts creates a set of NewContainerOpts from an ImageDescriptor and a containerd.Image
// The options can be used directly when launching a container
func (i *ImageDescriptor) ContainerOpts(image containerd.Image, o ...containerd.NewContainerOpts) []containerd.NewContainerOpts {
var opts []containerd.NewContainerOpts
var ociOpts []oci.SpecOpts

opts = append(opts, o...)
id := fmt.Sprintf("%s-%d", testContainerID, time.Now().UnixNano())
opts = append(opts, containerd.WithNewSnapshot(id, image))
ociOpts = append(ociOpts, oci.WithImageConfig(image))
if len(i.ImageOptions.Mounts) > 0 {
ociOpts = append(ociOpts, oci.WithMounts(i.ImageOptions.Mounts))
}
if i.ImageOptions.Gpu {
ociOpts = append(ociOpts, nvidia.WithGPUs(nvidia.WithAllCapabilities, nvidia.WithAllCapabilities))
}
if len(i.ImageOptions.Env) > 0 {
ociOpts = append(ociOpts, oci.WithEnv(i.ImageOptions.Env))
}
if i.ImageOptions.ShmSize > 0 {
ociOpts = append(ociOpts, oci.WithDevShmSize(i.ImageOptions.ShmSize))
}
if i.ImageOptions.Net == "host" {
hostname, err := os.Hostname()
if err != nil {
panic(fmt.Errorf("get hostname: %w", err))
}
ociOpts = append(ociOpts,
oci.WithHostNamespace(runtimespec.NetworkNamespace),
oci.WithHostHostsFile,
oci.WithHostResolvconf,
oci.WithEnv([]string{fmt.Sprintf("HOSTNAME=%s", hostname)}),
)
}

opts = append(opts, containerd.WithNewSpec(ociOpts...))
return opts
}

func GetImageList(file string) ([]ImageDescriptor, error) {
Expand Down

0 comments on commit 14d8448

Please sign in to comment.