refactor(env): split big function into smaller chuncks
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions

This commit is contained in:
criyle 2025-05-28 22:17:41 -04:00
parent ac3b4183fd
commit 033790c1c0
3 changed files with 466 additions and 344 deletions

160
env/env_cgroup_linux.go vendored Normal file
View File

@ -0,0 +1,160 @@
package env
import (
"context"
"fmt"
"os"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/criyle/go-judge/env/linuxcontainer"
"github.com/criyle/go-sandbox/pkg/cgroup"
ddbus "github.com/godbus/dbus/v5"
"go.uber.org/zap"
)
func setupCgroup(c Config, logger *zap.Logger) (cgroup.Cgroup, *cgroup.Controllers, error) {
prefix := c.CgroupPrefix
t := cgroup.DetectedCgroupType
ct, err := cgroup.GetAvailableController()
if err != nil {
logger.Error("failed to get available controllers", zap.Error(err))
return nil, nil, err
}
if t == cgroup.TypeV2 {
prefix, ct, err = setupCgroupV2(prefix, logger)
if err != nil {
return nil, nil, err
}
}
return createAndNestCgroup(prefix, ct, logger)
}
func setupCgroupV2(prefix string, logger *zap.Logger) (string, *cgroup.Controllers, error) {
logger.Info("running with cgroup v2, connecting systemd dbus to create cgroup")
conn, err := getSystemdConnection()
if err != nil {
logger.Info("connecting to systemd dbus failed, assuming running in container, enable cgroup v2 nesting support and take control of the whole cgroupfs", zap.Error(err))
return "", getControllersWithPrefix("", logger), nil
}
defer conn.Close()
scopeName := prefix + ".scope"
logger.Info("connected to systemd bus, attempting to create transient unit", zap.String("scopeName", scopeName))
if err := startTransientUnit(conn, scopeName, logger); err != nil {
return "", nil, err
}
scopeName, err = cgroup.GetCurrentCgroupPrefix()
if err != nil {
logger.Error("failed to get current cgroup prefix", zap.Error(err))
return "", nil, err
}
logger.Info("current cgroup", zap.String("scope_name", scopeName))
ct, err := cgroup.GetAvailableControllerWithPrefix(scopeName)
if err != nil {
logger.Error("failed to get available controller with prefix", zap.Error(err))
return "", nil, err
}
return scopeName, ct, nil
}
func getSystemdConnection() (*dbus.Conn, error) {
if os.Getuid() == 0 {
return dbus.NewSystemConnectionContext(context.TODO())
}
return dbus.NewUserConnectionContext(context.TODO())
}
func startTransientUnit(conn *dbus.Conn, scopeName string, logger *zap.Logger) error {
properties := []dbus.Property{
dbus.PropDescription("go judge - a high performance sandbox service base on container technologies"),
dbus.PropWants(scopeName),
dbus.PropPids(uint32(os.Getpid())),
newSystemdProperty("Delegate", true),
}
ch := make(chan string, 1)
if _, err := conn.StartTransientUnitContext(context.TODO(), scopeName, "replace", properties, ch); err != nil {
logger.Error("failed to start transient unit", zap.Error(err))
return fmt.Errorf("failed to start transient unit: %w", err)
}
s := <-ch
if s != "done" {
logger.Error("starting transient unit returns error", zap.String("status", s))
return fmt.Errorf("starting transient unit returns error: %s", s)
}
return nil
}
func getControllersWithPrefix(prefix string, logger *zap.Logger) *cgroup.Controllers {
ct, err := cgroup.GetAvailableControllerWithPrefix(prefix)
if err != nil {
logger.Error("failed to get available controller with prefix", zap.Error(err))
return nil
}
return ct
}
func createAndNestCgroup(prefix string, ct *cgroup.Controllers, logger *zap.Logger) (cgroup.Cgroup, *cgroup.Controllers, error) {
cgb, err := cgroup.New(prefix, ct)
if err != nil {
if os.Getuid() == 0 {
logger.Error("failed to create cgroup", zap.String("prefix", prefix), zap.Error(err))
return nil, nil, err
}
logger.Warn("not running in root and have no permission on cgroup, falling back to rlimit / rusage mode", zap.Error(err))
return nil, nil, nil
}
logger.Info("creating nesting api cgroup", zap.Any("cgroup", cgb))
if _, err = cgb.Nest("api"); err != nil {
if os.Getuid() != 0 {
logger.Warn("creating api cgroup with error, falling back to rlimit / rusage mode", zap.Error(err))
cgb.Destroy()
return nil, nil, nil
}
}
logger.Info("creating containers cgroup")
cg, err := cgb.New("containers")
if err != nil {
logger.Warn("creating containers cgroup with error, falling back to rlimit / rusage mode", zap.Error(err))
return nil, nil, nil
}
if ct != nil && !ct.Memory {
logger.Warn("memory cgroup is not enabled, falling back to rlimit / rusage mode")
}
if ct != nil && !ct.Pids {
logger.Warn("pid cgroup is not enabled, proc limit does not have effect")
}
return cg, ct, nil
}
func prepareCgroupPool(cgb cgroup.Cgroup, c Config) linuxcontainer.CgroupPool {
if cgb != nil {
return linuxcontainer.NewFakeCgroupPool(cgb, c.CPUCfsPeriod)
}
return nil
}
func getCgroupInfo(cgb cgroup.Cgroup, ct *cgroup.Controllers) (int, []string) {
cgroupType := int(cgroup.DetectedCgroupType)
if cgb == nil {
cgroupType = 0
}
cgroupControllers := []string{}
if ct != nil {
cgroupControllers = ct.Names()
}
return cgroupType, cgroupControllers
}
func newSystemdProperty(name string, units any) dbus.Property {
return dbus.Property{
Name: name,
Value: ddbus.MakeVariant(units),
}
}

442
env/env_linux.go vendored
View File

@ -7,7 +7,6 @@ import (
"sync/atomic" "sync/atomic"
"syscall" "syscall"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/criyle/go-judge/env/linuxcontainer" "github.com/criyle/go-judge/env/linuxcontainer"
"github.com/criyle/go-judge/env/pool" "github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/envexec" "github.com/criyle/go-judge/envexec"
@ -16,7 +15,6 @@ import (
"github.com/criyle/go-sandbox/pkg/forkexec" "github.com/criyle/go-sandbox/pkg/forkexec"
"github.com/criyle/go-sandbox/pkg/mount" "github.com/criyle/go-sandbox/pkg/mount"
"github.com/criyle/go-sandbox/runner" "github.com/criyle/go-sandbox/runner"
ddbus "github.com/godbus/dbus/v5"
"github.com/google/shlex" "github.com/google/shlex"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
@ -31,185 +29,65 @@ const (
// NewBuilder build a environment builder // NewBuilder build a environment builder
func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any, error) { func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any, error) {
var ( mountsConfig, mountBuilder, symbolicLinks, maskPaths, err := prepareMountAndPaths(c, logger)
mountBuilder *mount.Builder
symbolicLinks []container.SymbolicLink
maskPaths []string
unshareCgroup bool = true
)
mc, err := readMountConfig(c.MountConf)
if err != nil { if err != nil {
if !os.IsNotExist(err) { return nil, nil, err
logger.Error("failed to read mount config", zap.String("path", c.MountConf), zap.Error(err))
return nil, nil, err
}
logger.Info("mount.yaml does not exist, using default container mount", zap.String("path", c.MountConf))
mountBuilder = getDefaultMount(c.TmpFsParam)
} else {
mountBuilder, err = parseMountConfig(mc)
if err != nil {
logger.Error("failed to parse mount config", zap.Error(err))
return nil, nil, err
}
}
if mc != nil && len(mc.SymLinks) > 0 {
symbolicLinks = make([]container.SymbolicLink, 0, len(mc.SymLinks))
for _, l := range mc.SymLinks {
symbolicLinks = append(symbolicLinks, container.SymbolicLink{LinkPath: l.LinkPath, Target: l.Target})
}
} else {
symbolicLinks = defaultSymLinks
}
if mc != nil && len(mc.MaskPaths) > 0 {
maskPaths = mc.MaskPaths
} else {
maskPaths = defaultMaskPaths
} }
m := mountBuilder.FilterNotExist().Mounts m := mountBuilder.FilterNotExist().Mounts
logger.Info("created container mount", zap.Any("mountBuilder", mountBuilder))
seccomp, err := readSeccompConf(c.SeccompConf) seccomp, err := prepareSeccomp(c, logger)
if err != nil { if err != nil {
logger.Error("failed to load seccomp config", zap.String("path", c.SeccompConf), zap.Error(err)) return nil, nil, err
return nil, nil, fmt.Errorf("failed to load seccomp config: %w", err)
} }
if seccomp != nil { unshareFlags, unshareCgroup := prepareUnshareFlags(c, logger)
logger.Info("loaded seccomp filter", zap.String("path", c.SeccompConf)) credGen := prepareCredGen(c)
} hostName, domainName, workDir, cUID, cGID, initCmd, err := prepareContainerMeta(mountsConfig, logger)
unshareFlags := uintptr(forkexec.UnshareFlags)
if c.NetShare {
unshareFlags ^= syscall.CLONE_NEWNET
}
major, minor := kernelVersion()
unshareFlags ^= unix.CLONE_NEWCGROUP
if major < 4 || (major == 4 && minor < 6) {
unshareCgroup = false
logger.Info("kernel version < 4.6, don't unshare cgroup", zap.Int("major", major), zap.Int("minor", minor))
}
// use setuid container only if running in root privilege
var credGen container.CredGenerator
if os.Getuid() == 0 && c.ContainerCredStart > 0 {
credGen = newCredGen(uint32(c.ContainerCredStart))
}
hostName := containerName
domainName := containerName
workDir := defaultWorkDir
cUID := containerCred
cGID := containerCred
var initCmd []string
if mc != nil {
hostName = mc.HostName
domainName = mc.DomainName
workDir = mc.WorkDir
cUID = mc.UID
cGID = mc.GID
if mc.InitCmd != "" {
initCmd, err = shlex.Split(mc.InitCmd)
if err != nil {
logger.Error("failed to parse init_cmd", zap.String("init_cmd", mc.InitCmd), zap.Error(err))
return nil, nil, fmt.Errorf("failed to parse initCmd: %s %w", mc.InitCmd, err)
}
logger.Info("initialize container with command", zap.String("init_cmd", mc.InitCmd))
}
}
logger.Info("creating container builder",
zap.String("host_name", hostName),
zap.String("domain_name", domainName),
zap.String("work_dir", workDir),
)
b := &container.Builder{
TmpRoot: "go-judge",
Mounts: m,
SymbolicLinks: symbolicLinks,
MaskPaths: maskPaths,
CredGenerator: credGen,
Stderr: os.Stderr,
CloneFlags: unshareFlags,
ExecFile: c.ContainerInitPath,
HostName: hostName,
DomainName: domainName,
InitCommand: initCmd,
WorkDir: workDir,
ContainerUID: cUID,
ContainerGID: cGID,
UnshareCgroupBeforeExec: unshareCgroup,
}
cgb, ct, err := newCgroup(c, logger)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
var cgroupPool linuxcontainer.CgroupPool b := &container.Builder{
if cgb != nil { TmpRoot: "go-judge",
cgroupPool = linuxcontainer.NewFakeCgroupPool(cgb, c.CPUCfsPeriod) Mounts: m,
SymbolicLinks: symbolicLinks,
MaskPaths: maskPaths,
CredGenerator: credGen,
Stderr: os.Stderr,
CloneFlags: unshareFlags,
ExecFile: c.ContainerInitPath,
HostName: hostName,
DomainName: domainName,
InitCommand: initCmd,
WorkDir: workDir,
ContainerUID: cUID,
ContainerGID: cGID,
UnshareCgroupBeforeExec: unshareCgroup,
} }
cgroupType := int(cgroup.DetectedCgroupType)
if cgb == nil {
cgroupType = 0
}
cgroupControllers := []string{}
if ct != nil {
cgroupControllers = ct.Names()
}
conf := map[string]any{
"cgroupType": cgroupType,
"mount": m,
"symbolicLink": symbolicLinks,
"maskedPaths": maskPaths,
"hostName": hostName,
"domainName": domainName,
"workDir": workDir,
"uid": cUID,
"gid": cGID,
cgb, ct, err := setupCgroup(c, logger)
if err != nil {
return nil, nil, err
}
cgroupPool := prepareCgroupPool(cgb, c)
cgroupType, cgroupControllers := getCgroupInfo(cgb, ct)
conf := map[string]any{
"cgroupType": cgroupType,
"mount": m,
"symbolicLink": symbolicLinks,
"maskedPaths": maskPaths,
"hostName": hostName,
"domainName": domainName,
"workDir": workDir,
"uid": cUID,
"gid": cGID,
"cgroupControllers": cgroupControllers, "cgroupControllers": cgroupControllers,
} }
if cgb != nil && cgroupType == cgroup.TypeV2 && (major > 5 || major == 5 && minor >= 7) {
logger.Info("running kernel >= 5.7 with cgroup V2, trying faster clone3(CLONE_INTO_CGROUP)", if tryClone3Builder := tryClone3(c, b, cgb, cgroupType, cgroupPool, workDir, seccomp, logger); tryClone3Builder != nil {
zap.Int("major", major), zap.Int("minor", minor)) conf["clone3"] = true
if b := func() pool.EnvBuilder { return tryClone3Builder, conf, nil
b := linuxcontainer.NewEnvBuilder(linuxcontainer.Config{
Builder: b,
CgroupPool: cgroupPool,
WorkDir: workDir,
Cpuset: c.Cpuset,
CPURate: c.EnableCPURate,
Seccomp: seccomp,
CgroupFd: true,
})
e, err := b.Build()
if err != nil {
logger.Info("environment build failed", zap.Error(err))
return nil
}
defer e.Destroy()
p, err := e.Execve(context.TODO(), envexec.ExecveParam{
Args: []string{"/usr/bin/env"},
Limit: envexec.Limit{
Memory: 256 << 20,
Proc: 1,
},
})
if err != nil {
logger.Info("environment run failed", zap.Error(err))
return nil
}
<-p.Done()
r := p.Result()
if r.Status == runner.StatusRunnerError {
logger.Info("environment result failed", zap.Stringer("result", r))
return nil
}
return b
}(); b != nil {
conf["clone3"] = true
return b, conf, nil
}
} }
return linuxcontainer.NewEnvBuilder(linuxcontainer.Config{ return linuxcontainer.NewEnvBuilder(linuxcontainer.Config{
@ -222,99 +100,163 @@ func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any,
}), conf, nil }), conf, nil
} }
func newCgroup(c Config, logger *zap.Logger) (cgroup.Cgroup, *cgroup.Controllers, error) { func prepareMountAndPaths(c Config, logger *zap.Logger) (*Mounts, *mount.Builder, []container.SymbolicLink, []string, error) {
prefix := c.CgroupPrefix mc, err := readMountConfig(c.MountConf)
t := cgroup.DetectedCgroupType
ct, err := cgroup.GetAvailableController()
if err != nil { if err != nil {
logger.Error("failed to get available controllers", zap.Error(err)) if !os.IsNotExist(err) {
return nil, nil, err logger.Error("failed to read mount config", zap.String("path", c.MountConf), zap.Error(err))
} return nil, nil, nil, nil, err
if t == cgroup.TypeV2 {
logger.Info("running with cgroup v2, connecting systemd dbus to create cgroup")
var conn *dbus.Conn
if os.Getuid() == 0 {
conn, err = dbus.NewSystemConnectionContext(context.TODO())
} else {
conn, err = dbus.NewUserConnectionContext(context.TODO())
}
if err != nil {
logger.Info("connecting to systemd dbus failed, assuming running in container, enable cgroup v2 nesting support and take control of the whole cgroupfs", zap.Error(err))
prefix = ""
} else {
defer conn.Close()
scopeName := c.CgroupPrefix + ".scope"
logger.Info("connected to systemd bus, attempting to create transient unit", zap.String("scopeName", scopeName))
properties := []dbus.Property{
dbus.PropDescription("go judge - a high performance sandbox service base on container technologies"),
dbus.PropWants(scopeName),
dbus.PropPids(uint32(os.Getpid())),
newSystemdProperty("Delegate", true),
}
ch := make(chan string, 1)
if _, err := conn.StartTransientUnitContext(context.TODO(), scopeName, "replace", properties, ch); err != nil {
logger.Error("failed to start transient unit", zap.Error(err))
return nil, nil, fmt.Errorf("failed to start transient unit: %w", err)
}
s := <-ch
if s != "done" {
logger.Error("starting transient unit returns error", zap.String("status", s), zap.Error(err))
return nil, nil, fmt.Errorf("starting transient unit returns error: %w", err)
}
scopeName, err := cgroup.GetCurrentCgroupPrefix()
if err != nil {
logger.Error("failed to get current cgroup prefix", zap.Error(err))
return nil, nil, err
}
logger.Info("current cgroup", zap.String("scope_name", scopeName))
prefix = scopeName
ct, err = cgroup.GetAvailableControllerWithPrefix(prefix)
if err != nil {
logger.Error("failed to get available controller with prefix", zap.Error(err))
return nil, nil, err
}
} }
logger.Info("mount.yaml does not exist, using default container mount", zap.String("path", c.MountConf))
return nil, getDefaultMount(c.TmpFsParam), defaultSymLinks, defaultMaskPaths, nil
} }
cgb, err := cgroup.New(prefix, ct) mountBuilder, err := parseMountConfig(mc)
if err != nil { if err != nil {
if os.Getuid() == 0 { logger.Error("failed to parse mount config", zap.Error(err))
logger.Error("failed to create cgroup", zap.String("prefix", prefix), zap.Error(err)) return nil, nil, nil, nil, err
return nil, nil, err }
var symbolicLinks []container.SymbolicLink
if len(mc.SymLinks) > 0 {
symbolicLinks = make([]container.SymbolicLink, 0, len(mc.SymLinks))
for _, l := range mc.SymLinks {
symbolicLinks = append(symbolicLinks, container.SymbolicLink{LinkPath: l.LinkPath, Target: l.Target})
} }
logger.Warn("not running in root and have no permission on cgroup, falling back to rlimit / rusage mode", zap.Error(err)) } else {
return nil, nil, nil symbolicLinks = defaultSymLinks
} }
logger.Info("creating nesting api cgroup", zap.Any("cgroup", cgb)) maskPaths := defaultMaskPaths
if _, err = cgb.Nest("api"); err != nil { if len(mc.MaskPaths) > 0 {
if os.Getuid() != 0 { maskPaths = mc.MaskPaths
logger.Warn("creating api cgroup with error, falling back to rlimit / rusage mode", zap.Error(err))
cgb.Destroy()
return nil, nil, nil
}
} }
logger.Info("created container mount", zap.Any("mountBuilder", mountBuilder))
logger.Info("creating containers cgroup") return mc, mountBuilder, symbolicLinks, maskPaths, nil
cg, err := cgb.New("containers")
if err != nil {
logger.Warn("creating containers cgroup with error, falling back to rlimit / rusage mode", zap.Error(err))
cgb = nil
}
if !ct.Memory {
logger.Warn("memory cgroup is not enabled, falling back to rlimit / rusage mode")
}
if !ct.Pids {
logger.Warn("pid cgroup is not enabled, proc limit does not have effect")
}
return cg, ct, nil
} }
func newSystemdProperty(name string, units any) dbus.Property { func prepareSeccomp(c Config, logger *zap.Logger) ([]syscall.SockFilter, error) {
return dbus.Property{ seccomp, err := readSeccompConf(c.SeccompConf)
Name: name, if err != nil {
Value: ddbus.MakeVariant(units), logger.Error("failed to load seccomp config", zap.String("path", c.SeccompConf), zap.Error(err))
return nil, fmt.Errorf("failed to load seccomp config: %w", err)
} }
if seccomp != nil {
logger.Info("loaded seccomp filter", zap.String("path", c.SeccompConf))
}
return seccomp, nil
}
func prepareUnshareFlags(c Config, logger *zap.Logger) (uintptr, bool) {
unshareFlags := uintptr(forkexec.UnshareFlags)
if c.NetShare {
unshareFlags ^= syscall.CLONE_NEWNET
}
major, minor := kernelVersion()
unshareFlags ^= unix.CLONE_NEWCGROUP
unshareCgroup := true
if major < 4 || (major == 4 && minor < 6) {
unshareCgroup = false
logger.Info("kernel version < 4.6, don't unshare cgroup", zap.Int("major", major), zap.Int("minor", minor))
}
return unshareFlags, unshareCgroup
}
func prepareCredGen(c Config) container.CredGenerator {
if os.Getuid() == 0 && c.ContainerCredStart > 0 {
return newCredGen(uint32(c.ContainerCredStart))
}
return nil
}
func prepareContainerMeta(mc *Mounts, logger *zap.Logger) (hostName, domainName, workDir string, cUID, cGID int, initCmd []string, err error) {
hostName = containerName
domainName = containerName
workDir = defaultWorkDir
cUID = containerCred
cGID = containerCred
if mc != nil {
if mc.HostName != "" {
hostName = mc.HostName
}
if mc.DomainName != "" {
domainName = mc.DomainName
}
if mc.WorkDir != "" {
workDir = mc.WorkDir
}
if mc.UID != 0 {
cUID = mc.UID
}
if mc.GID != 0 {
cGID = mc.GID
}
if mc.InitCmd != "" {
initCmd, err = shlex.Split(mc.InitCmd)
if err != nil {
logger.Error("failed to parse init_cmd", zap.String("init_cmd", mc.InitCmd), zap.Error(err))
err = fmt.Errorf("failed to parse initCmd: %s %w", mc.InitCmd, err)
return
}
logger.Info("initialize container with command", zap.String("init_cmd", mc.InitCmd))
}
}
logger.Info("creating container builder",
zap.String("host_name", hostName),
zap.String("domain_name", domainName),
zap.String("work_dir", workDir),
)
return
}
func tryClone3(
c Config,
envBuilder linuxcontainer.EnvironmentBuilder,
cgb cgroup.Cgroup,
cgroupType int,
cgroupPool linuxcontainer.CgroupPool,
workDir string,
seccomp []syscall.SockFilter,
logger *zap.Logger,
) pool.EnvBuilder {
major, minor := kernelVersion()
if cgb == nil || cgroupType != cgroup.TypeV2 || (major < 5 || (major == 5 && minor < 7)) {
return nil
}
logger.Info("running kernel >= 5.7 with cgroup V2, trying faster clone3(CLONE_INTO_CGROUP)",
zap.Int("major", major), zap.Int("minor", minor))
b := linuxcontainer.NewEnvBuilder(linuxcontainer.Config{
Builder: envBuilder,
CgroupPool: cgroupPool,
WorkDir: workDir,
Cpuset: c.Cpuset,
CPURate: c.EnableCPURate,
Seccomp: seccomp,
CgroupFd: true,
})
e, err := b.Build()
if err != nil {
logger.Info("environment build failed", zap.Error(err))
return nil
}
defer e.Destroy()
p, err := e.Execve(context.TODO(), envexec.ExecveParam{
Args: []string{"/usr/bin/env"},
Limit: envexec.Limit{
Memory: 256 << 20,
Proc: 1,
},
})
if err != nil {
logger.Info("environment run failed", zap.Error(err))
return nil
}
<-p.Done()
r := p.Result()
if r.Status == runner.StatusRunnerError {
logger.Info("environment result failed", zap.Stringer("result", r))
return nil
}
return b
} }
type credGen struct { type credGen struct {

View File

@ -30,107 +30,19 @@ func copyOutAndCollect(m Environment, c *Cmd, ptc []pipeCollector, newStoreFile
fileError = append(fileError, e) fileError = append(fileError, e)
} }
// copy out // copy out files
for _, n := range c.CopyOut { for _, n := range c.CopyOut {
n := n n := n
g.Go(func() (err error) { g.Go(func() error {
t := ErrCopyOutOpen return copyOutFile(m, c, n, newStoreFile, put, addError)
defer func() {
if err != nil {
addError(FileError{
Name: n.Name,
Type: t,
Message: err.Error(),
})
}
}()
cf, err := m.Open(n.Name, os.O_RDONLY, 0777)
if err != nil {
if errors.Is(err, os.ErrNotExist) && n.Optional {
return nil
}
return err
}
defer cf.Close()
stat, err := cf.Stat()
if err != nil {
return fmt.Errorf("copyout: stat %q: %w", n.Name, err)
}
// check regular file
if stat.Mode()&os.ModeType != 0 {
t = ErrCopyOutNotRegularFile
return fmt.Errorf("copyout: %q is not a regular file: %v", n.Name, stat.Mode())
}
// check size limit
s := stat.Size()
if c.CopyOutMax > 0 && s > int64(c.CopyOutMax) {
t = ErrCopyOutSizeExceeded
return fmt.Errorf("copyout: %q size (%d) exceeds limit (%d)", n.Name, s, c.CopyOutMax)
}
// create store file
buf, err := newStoreFile()
if err != nil {
t = ErrCopyOutCreateFile
return fmt.Errorf("copyout: failed to create store file for %q: %w", n.Name, err)
}
// Ensure not copy over file size
_, err = buf.ReadFrom(io.LimitReader(cf, s))
if err != nil {
t = ErrCopyOutCopyContent
buf.Close()
return fmt.Errorf("copyout: failed to copy content for %q: %w", n.Name, err)
}
put(buf, n.Name)
return nil
}) })
} }
// collect pipe // collect pipes
for _, p := range ptc { for _, p := range ptc {
p := p p := p
g.Go(func() (err error) { g.Go(func() error {
errType := ErrCopyOutOpen return collectPipe(p, newStoreFile, put, addError)
defer func() {
if err != nil {
addError(FileError{
Name: p.name,
Type: errType,
Message: err.Error(),
})
}
}()
<-p.done
if p.storage {
put(p.buffer, p.name)
if fi, err := p.buffer.Stat(); err == nil && fi.Size() > int64(p.limit) {
p.buffer.Truncate(int64(p.limit) + 1)
errType = ErrCollectSizeExceeded
return runner.StatusOutputLimitExceeded
}
} else {
defer p.buffer.Close()
buf, err := newStoreFile()
if err != nil {
errType = ErrCopyOutCreateFile
return fmt.Errorf("collect: failed to create store file for %q: %w", p.name, err)
}
// Ensure not copy over file size
_, err = buf.ReadFrom(io.LimitReader(p.buffer, int64(p.limit)+1))
if err != nil {
errType = ErrCopyOutCopyContent
buf.Close()
return fmt.Errorf("collect: failed to copy content for %q: %w", p.name, err)
}
put(buf, p.name)
if fi, err := p.buffer.Stat(); err == nil && fi.Size() > int64(p.limit) {
errType = ErrCollectSizeExceeded
return runner.StatusOutputLimitExceeded
}
}
return nil
}) })
} }
@ -144,3 +56,111 @@ func copyOutAndCollect(m Environment, c *Cmd, ptc []pipeCollector, newStoreFile
err := g.Wait() err := g.Wait()
return rt, fileError, err return rt, fileError, err
} }
func copyOutFile(
m Environment,
c *Cmd,
n CmdCopyOutFile,
newStoreFile NewStoreFile,
put func(*os.File, string),
addError func(FileError),
) (err error) {
t := ErrCopyOutOpen
defer func() {
if err != nil {
addError(FileError{
Name: n.Name,
Type: t,
Message: err.Error(),
})
}
}()
cf, err := m.Open(n.Name, os.O_RDONLY, 0777)
if err != nil {
if errors.Is(err, os.ErrNotExist) && n.Optional {
return nil
}
return err
}
defer cf.Close()
stat, err := cf.Stat()
if err != nil {
return fmt.Errorf("copyout: stat %q: %w", n.Name, err)
}
// check regular file
if stat.Mode()&os.ModeType != 0 {
t = ErrCopyOutNotRegularFile
return fmt.Errorf("copyout: %q is not a regular file: %v", n.Name, stat.Mode())
}
// check size limit
s := stat.Size()
if c.CopyOutMax > 0 && s > int64(c.CopyOutMax) {
t = ErrCopyOutSizeExceeded
return fmt.Errorf("copyout: %q size (%d) exceeds limit (%d)", n.Name, s, c.CopyOutMax)
}
// create store file
buf, err := newStoreFile()
if err != nil {
t = ErrCopyOutCreateFile
return fmt.Errorf("copyout: failed to create store file for %q: %w", n.Name, err)
}
// Ensure not copy over file size
_, err = buf.ReadFrom(io.LimitReader(cf, s))
if err != nil {
t = ErrCopyOutCopyContent
buf.Close()
return fmt.Errorf("copyout: failed to copy content for %q: %w", n.Name, err)
}
put(buf, n.Name)
return nil
}
func collectPipe(
p pipeCollector,
newStoreFile NewStoreFile,
put func(*os.File, string),
addError func(FileError),
) (err error) {
errType := ErrCopyOutOpen
defer func() {
if err != nil {
addError(FileError{
Name: p.name,
Type: errType,
Message: err.Error(),
})
}
}()
<-p.done
if p.storage {
put(p.buffer, p.name)
if fi, err := p.buffer.Stat(); err == nil && fi.Size() > int64(p.limit) {
p.buffer.Truncate(int64(p.limit) + 1)
errType = ErrCollectSizeExceeded
return runner.StatusOutputLimitExceeded
}
} else {
defer p.buffer.Close()
buf, err := newStoreFile()
if err != nil {
errType = ErrCopyOutCreateFile
return fmt.Errorf("collect: failed to create store file for %q: %w", p.name, err)
}
// Ensure not copy over file size
_, err = buf.ReadFrom(io.LimitReader(p.buffer, int64(p.limit)+1))
if err != nil {
errType = ErrCopyOutCopyContent
buf.Close()
return fmt.Errorf("collect: failed to copy content for %q: %w", p.name, err)
}
put(buf, p.name)
if fi, err := p.buffer.Stat(); err == nil && fi.Size() > int64(p.limit) {
errType = ErrCollectSizeExceeded
return runner.StatusOutputLimitExceeded
}
}
return nil
}