align interface for ptrace and namespace with deamon

This commit is contained in:
criyle 2019-08-16 22:12:08 -07:00
parent c7679b02fb
commit c9f2bd0ae8
5 changed files with 193 additions and 107 deletions

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"os/signal"
"time" "time"
"github.com/criyle/go-judger/cgroup" "github.com/criyle/go-judger/cgroup"
@ -34,7 +35,7 @@ var (
// Runner can be ptraced runner or namespaced runner // Runner can be ptraced runner or namespaced runner
type Runner interface { type Runner interface {
Start() (specs.TraceResult, error) Start(<-chan struct{}) (<-chan specs.TraceResult, error)
} }
func printUsage() { func printUsage() {
@ -85,8 +86,33 @@ func main() {
if workPath == "" { if workPath == "" {
workPath, _ = os.Getwd() workPath, _ = os.Getwd()
} }
rt, f, err := run()
var (
f *os.File
err error
)
if result == "stdout" {
f = os.Stdout
} else if result == "stderr" {
f = os.Stderr
} else {
f, err = os.Create(result)
if err != nil {
debug("Failed to open result file:", err)
return
}
defer f.Close()
}
rt, err := run()
if rt == nil {
rt = &specs.TraceResult{
TraceStatus: specs.TraceCodeFatal,
}
}
if err == nil && rt.TraceStatus != specs.TraceCodeNormal {
err = rt.TraceStatus
}
if err != nil { if err != nil {
debug(err) debug(err)
c, ok := err.(specs.TraceCode) c, ok := err.(specs.TraceCode)
@ -103,14 +129,22 @@ func main() {
} }
} }
func run() (*specs.TraceResult, *os.File, error) { type deamonRunner struct {
*deamon.Master
*deamon.ExecveParam
}
func (r *deamonRunner) Start(done <-chan struct{}) (<-chan specs.TraceResult, error) {
return r.Master.Execve(done, r.ExecveParam)
}
func run() (*specs.TraceResult, error) {
var ( var (
runner Runner runner Runner
cg *cgroup.CGroup cg *cgroup.CGroup
err error err error
execFile uintptr execFile uintptr
rt specs.TraceResult rt specs.TraceResult
f *os.File
) )
addRead := runconfig.GetExtraSet(addReadable, addRawReadable) addRead := runconfig.GetExtraSet(addReadable, addRawReadable)
@ -120,11 +154,11 @@ func run() (*specs.TraceResult, *os.File, error) {
if useCGroup { if useCGroup {
cg, err = cgroup.NewCGroup("run_program") cg, err = cgroup.NewCGroup("run_program")
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
defer cg.Destroy() defer cg.Destroy()
if err = cg.SetMemoryLimitInBytes(memoryLimit << 20); err != nil { if err = cg.SetMemoryLimitInBytes(memoryLimit << 20); err != nil {
return nil, nil, err return nil, err
} }
} }
@ -140,11 +174,11 @@ func run() (*specs.TraceResult, *os.File, error) {
if memfile { if memfile {
fin, err := os.Open(args[0]) fin, err := os.Open(args[0])
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("filed to open args[0]: %v", err) return nil, fmt.Errorf("filed to open args[0]: %v", err)
} }
execf, err := memfd.DupToMemfd("run_program", fin) execf, err := memfd.DupToMemfd("run_program", fin)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("dup to memfd failed: %v", err) return nil, fmt.Errorf("dup to memfd failed: %v", err)
} }
fin.Close() fin.Close()
defer execf.Close() defer execf.Close()
@ -155,7 +189,7 @@ func run() (*specs.TraceResult, *os.File, error) {
// open input / output / err files // open input / output / err files
files, err := prepareFiles(inputFileName, outputFileName, errorFileName) files, err := prepareFiles(inputFileName, outputFileName, errorFileName)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to prepare files: %v", err) return nil, fmt.Errorf("failed to prepare files: %v", err)
} }
defer closeFiles(files) defer closeFiles(files)
@ -177,52 +211,37 @@ func run() (*specs.TraceResult, *os.File, error) {
} }
if useDeamon { if useDeamon {
sTime := time.Now()
root, err := ioutil.TempDir("", "dm") root, err := ioutil.TempDir("", "dm")
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot make temp root for deamon namespace: %v", err) return nil, fmt.Errorf("cannot make temp root for deamon namespace: %v", err)
} }
defer os.RemoveAll(root) defer os.RemoveAll(root)
m, err := deamon.New(root) m, err := deamon.New(root)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to new master: %v", err) return nil, fmt.Errorf("failed to new master: %v", err)
} }
defer m.Destroy()
err = m.Ping() err = m.Ping()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to ping deamon: %v", err) return nil, fmt.Errorf("failed to ping deamon: %v", err)
} }
runner = &deamonRunner{
rTime := time.Now() Master: m,
done := make(chan struct{}) ExecveParam: &deamon.ExecveParam{
s, err := m.Execve(done, &deamon.ExecveParam{
Args: args, Args: args,
Envv: []string{pathEnv}, Envv: []string{pathEnv},
Fds: fds, Fds: fds,
ExecFile: execFile, ExecFile: execFile,
RLimits: rlims.PrepareRLimit(), RLimits: rlims.PrepareRLimit(),
SyncFunc: syncFunc, SyncFunc: syncFunc,
}) },
if err != nil {
return nil, nil, fmt.Errorf("failed to execve: %v", err)
} }
tC := time.After(time.Duration(int64(realTimeLimit) * int64(time.Second)))
select {
case <-tC:
close(done)
rt = <-s
case rt = <-s:
}
eTime := time.Now()
rt.SetUpTime = int64(rTime.Sub(sTime))
rt.RunningTime = int64(eTime.Sub(rTime))
m.Destroy()
} else if namespace { } else if namespace {
h.SyscallAllow = append(h.SyscallAllow, h.SyscallTrace...) h.SyscallAllow = append(h.SyscallAllow, h.SyscallTrace...)
root, err := ioutil.TempDir("", "ns") root, err := ioutil.TempDir("", "ns")
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot make temp root for new namespace") return nil, fmt.Errorf("cannot make temp root for new namespace")
} }
defer os.RemoveAll(root) defer os.RemoveAll(root)
@ -273,38 +292,54 @@ func run() (*specs.TraceResult, *os.File, error) {
} }
} }
if result == "stdout" { // gracefully shutdown
f = os.Stdout sig := make(chan os.Signal, 1)
} else if result == "stderr" { signal.Notify(sig, os.Interrupt)
f = os.Stderr
} else {
f, err := os.Create(result)
if err != nil {
return nil, nil, fmt.Errorf("Failed to open result file: %v", err)
}
defer f.Close()
}
// Run tracer // Run tracer
if runner != nil { sTime := time.Now()
rt, err = runner.Start() done := make(chan struct{})
s, err := runner.Start(done)
rTime := time.Now()
if err != nil {
return nil, fmt.Errorf("failed to execve: %v", err)
} }
tC := time.After(time.Duration(int64(realTimeLimit) * int64(time.Second)))
select {
case <-sig:
close(done)
rt = <-s
rt.TraceStatus = specs.TraceCodeFatal
case <-tC:
close(done)
rt = <-s
case rt = <-s:
}
eTime := time.Now()
if rt.SetUpTime == 0 {
rt.SetUpTime = int64(rTime.Sub(sTime))
rt.RunningTime = int64(eTime.Sub(rTime))
}
debug("results:", rt, err) debug("results:", rt, err)
if useCGroup { if useCGroup {
cpu, err := cg.CpuacctUsage() cpu, err := cg.CpuacctUsage()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cgroup cpu: %v", err) return nil, fmt.Errorf("cgroup cpu: %v", err)
} }
memory, err := cg.MemoryMaxUsageInBytes() memory, err := cg.MemoryMaxUsageInBytes()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cgroup memory: %v", err) return nil, fmt.Errorf("cgroup memory: %v", err)
} }
debug("cgroup: cpu: ", cpu, " memory: ", memory) debug("cgroup: cpu: ", cpu, " memory: ", memory)
rt.UserTime = cpu / uint64(time.Millisecond) rt.UserTime = cpu / uint64(time.Millisecond)
rt.UserMem = memory >> 10 rt.UserMem = memory >> 10
} }
return &rt, f, nil return &rt, nil
} }
func debug(v ...interface{}) { func debug(v ...interface{}) {

View File

@ -77,7 +77,8 @@ var (
// default syscalls to trace // default syscalls to trace
defaultSyscallTraces = []string{ defaultSyscallTraces = []string{
// should be traced // execute file
"execve",
"execveat", "execveat",
// file open // file open

View File

@ -10,19 +10,19 @@ import (
) )
// Start starts the tracing process // Start starts the tracing process
func (r *RunProgram) Start() (rt specs.TraceResult, err error) { func (r *RunProgram) Start(done <-chan struct{}) (<-chan specs.TraceResult, error) {
// build seccomp filter // build seccomp filter
filter, err := buildFilter(r.ShowDetails, r.SyscallAllowed, r.SyscallTraced) filter, err := buildFilter(r.ShowDetails, r.SyscallAllowed, r.SyscallTraced)
if err != nil { if err != nil {
println(err) println(err)
return return nil, err
} }
defer filter.Release() defer filter.Release()
bpf, err := seccomp.FilterToBPF(filter) bpf, err := seccomp.FilterToBPF(filter)
if err != nil { if err != nil {
println(err) println(err)
return return nil, err
} }
ch := &forkexec.Runner{ ch := &forkexec.Runner{
@ -42,7 +42,7 @@ func (r *RunProgram) Start() (rt specs.TraceResult, err error) {
Unsafe: r.Unsafe, Unsafe: r.Unsafe,
Handler: r.Handler, Handler: r.Handler,
} }
return tracer.Trace(th, ch, specs.ResLimit(r.TraceLimit)) return tracer.Trace(done, th, ch, specs.ResLimit(r.TraceLimit))
} }
// build filter builds the libseccomp filter according to the allow, trace and show details // build filter builds the libseccomp filter according to the allow, trace and show details

View File

@ -3,7 +3,6 @@ package rununshared
import ( import (
"fmt" "fmt"
"os" "os"
"syscall"
"time" "time"
"github.com/criyle/go-judger/forkexec" "github.com/criyle/go-judger/forkexec"
@ -19,18 +18,18 @@ const (
) )
// Start starts the unshared process // Start starts the unshared process
func (r *RunUnshared) Start() (rt specs.TraceResult, err error) { func (r *RunUnshared) Start(done <-chan struct{}) (<-chan specs.TraceResult, error) {
filter, err := seccomp.BuildFilter(libseccomp.ActKill, libseccomp.ActTrap, r.SyscallAllowed, []string{}) filter, err := seccomp.BuildFilter(libseccomp.ActKill, libseccomp.ActTrap, r.SyscallAllowed, []string{})
if err != nil { if err != nil {
println(err) println(err)
return return nil, err
} }
defer filter.Release() defer filter.Release()
bpf, err := seccomp.FilterToBPF(filter) bpf, err := seccomp.FilterToBPF(filter)
if err != nil { if err != nil {
println(err) println(err)
return return nil, err
} }
ch := &forkexec.Runner{ ch := &forkexec.Runner{
@ -51,18 +50,36 @@ func (r *RunUnshared) Start() (rt specs.TraceResult, err error) {
DropCaps: true, DropCaps: true,
SyncFunc: r.SyncFunc, SyncFunc: r.SyncFunc,
} }
return r.Trace(ch)
result := make(chan specs.TraceResult, 1)
start := make(chan struct{})
finish := make(chan struct{})
// run
go func() {
defer close(finish)
ret, err2 := r.Trace(done, start, ch)
err = err2
result <- ret
}()
select {
case <-start:
case <-finish:
}
return result, err
} }
// Trace tracks child processes // Trace tracks child processes
func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult, err error) { func (r *RunUnshared) Trace(done <-chan struct{}, start chan<- struct{},
runner *forkexec.Runner) (result specs.TraceResult, err error) {
var ( var (
wstatus unix.WaitStatus // wait4 wait status wstatus unix.WaitStatus // wait4 wait status
rusage unix.Rusage // wait4 rusage rusage unix.Rusage // wait4 rusage
tle = false tle = false
status = specs.TraceCodeNormal status = specs.TraceCodeNormal
sTime = time.Now().UnixNano() // start time sTime = time.Now() // start time
fTime int64 // finish time for setup fTime time.Time // finish time for setup
) )
// Start the runner // Start the runner
@ -72,29 +89,35 @@ func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult,
result.TraceStatus = specs.TraceCodeRE result.TraceStatus = specs.TraceCodeRE
return result, err return result, err
} }
// Set real time limit, kill process after it
timer := time.AfterFunc(time.Duration(int64(r.ResLimits.RealTimeLimit)*1e6), func() { close(start)
finish := make(chan struct{})
defer close(finish)
// handle cancel
go func() {
select {
case <-done:
tle = true tle = true
killAll(pgid) killAll(pgid)
}) case <-finish:
}
}()
defer func() { defer func() {
timer.Stop()
if tle { if tle {
err = specs.TraceCodeTLE err = specs.TraceCodeTLE
} }
// kill all tracee upon return // kill all tracee upon return
killAll(pgid) killAll(pgid)
collectZombie(pgid) collectZombie(pgid)
result.TraceStat.SetUpTime = fTime - sTime result.SetUpTime = fTime.Sub(sTime).Nanoseconds()
result.RunningTime = time.Now().UnixNano() - fTime result.RunningTime = time.Since(fTime).Nanoseconds()
}() }()
// currently, we do not have any way to track mount syscall time usage fTime = time.Now()
fTime = time.Now().UnixNano()
for { for {
pid, err := unix.Wait4(pgid, &wstatus, unix.WALL, &rusage) _, err := unix.Wait4(pgid, &wstatus, 0, &rusage)
r.println("wait4: ", wstatus) r.println("wait4: ", wstatus)
if err != nil { if err != nil {
return result, specs.TraceCodeFatal return result, specs.TraceCodeFatal
@ -127,7 +150,7 @@ func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult,
case wstatus.Signaled(): case wstatus.Signaled():
sig := wstatus.Signal() sig := wstatus.Signal()
switch sig { switch sig {
case unix.SIGXCPU: case unix.SIGXCPU, unix.SIGKILL:
status = specs.TraceCodeTLE status = specs.TraceCodeTLE
case unix.SIGXFSZ: case unix.SIGXFSZ:
status = specs.TraceCodeOLE status = specs.TraceCodeOLE
@ -138,8 +161,6 @@ func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult,
} }
result.TraceStatus = status result.TraceStatus = status
return result, status return result, status
case wstatus.Stopped():
unix.Kill(pid, syscall.SIGCONT)
} }
} }
return result, status return result, status
@ -152,10 +173,10 @@ func killAll(pgid int) {
// collect died child processes // collect died child processes
func collectZombie(pgid int) { func collectZombie(pgid int) {
var wstatus unix.WaitStatus
// collect zombies // collect zombies
for { for {
var wstatus unix.WaitStatus if _, err := unix.Wait4(-pgid, &wstatus, unix.WALL|unix.WNOHANG, nil); err != nil {
if _, err := unix.Wait4(-pgid, &wstatus, unix.WALL|unix.WNOWAIT, nil); err != nil {
break break
} }
} }

View File

@ -15,10 +15,32 @@ const (
MsgHandle MsgHandle
) )
// Trace traces all child process that created by runner // Trace starts new goroutine and trace runner with ptrace
// this function should called only once and in the same thread that func Trace(done <-chan struct{}, handler Handler, runner Runner, limits specs.ResLimit) (<-chan specs.TraceResult, error) {
// exec tracee var err error
func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.TraceResult, err error) { result := make(chan specs.TraceResult, 1)
start := make(chan struct{})
finish := make(chan struct{})
// run
go func() {
defer close(finish)
ret, err2 := TraceRun(done, start, handler, runner, limits)
err = err2
result <- ret
}()
select {
case <-start:
case <-finish:
}
return result, err
}
// TraceRun start and traces all child process by runner in the calling goroutine
// parameter done used to cancel work, start is used notify child starts
func TraceRun(done <-chan struct{}, start chan<- struct{},
handler Handler, runner Runner, limits specs.ResLimit) (result specs.TraceResult, err error) {
var ( var (
wstatus unix.WaitStatus // wait4 wait status wstatus unix.WaitStatus // wait4 wait status
rusage unix.Rusage // wait4 rusage rusage unix.Rusage // wait4 rusage
@ -26,8 +48,8 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
traced = make(map[int]bool) // store all process that have set ptrace options traced = make(map[int]bool) // store all process that have set ptrace options
execved = false // store whether the runner process have successfully execvd execved = false // store whether the runner process have successfully execvd
pid int // store pid of wait4 result pid int // store pid of wait4 result
sTime = time.Now().UnixNano() // records start time for trace process sTime = time.Now() // records start time for trace process
fTime int64 // records finish time for execve fTime time.Time // records finish time for execve
) )
// ptrace is thread based (kernel proc) // ptrace is thread based (kernel proc)
@ -43,16 +65,23 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
return result, err return result, err
} }
// Set real time limit, kill process after it close(start)
timer := time.AfterFunc(time.Duration(int64(limits.RealTimeLimit)*1e6), func() { finish := make(chan struct{})
defer close(finish)
// handle cancelation
go func() {
select {
case <-done:
tle = true tle = true
killAll(pgid) killAll(pgid)
}) case <-finish:
}
}()
// handler potential panic and tle // handler potential panic and tle
// also ensure processes was well terminated // also ensure processes was well terminated
defer func() { defer func() {
timer.Stop()
if tle { if tle {
err = specs.TraceCodeTLE err = specs.TraceCodeTLE
} }
@ -63,8 +92,8 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
// kill all tracee upon return // kill all tracee upon return
killAll(pgid) killAll(pgid)
collectZombie(pgid) collectZombie(pgid)
result.TraceStat.SetUpTime = fTime - sTime result.SetUpTime = fTime.Sub(sTime).Nanoseconds()
result.RunningTime = time.Now().UnixNano() - fTime result.RunningTime = time.Since(fTime).Nanoseconds()
}() }()
// trace unixs // trace unixs
@ -125,7 +154,7 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
if pid == pgid { if pid == pgid {
delete(traced, pid) delete(traced, pid)
switch sig { switch sig {
case unix.SIGXCPU: case unix.SIGXCPU, unix.SIGKILL:
status = specs.TraceCodeTLE status = specs.TraceCodeTLE
case unix.SIGXFSZ: case unix.SIGXFSZ:
status = specs.TraceCodeOLE status = specs.TraceCodeOLE
@ -176,7 +205,7 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
case unix.PTRACE_EVENT_EXEC: case unix.PTRACE_EVENT_EXEC:
// forked tracee have successfully called execve // forked tracee have successfully called execve
if !execved { if !execved {
fTime = time.Now().UnixNano() fTime = time.Now()
execved = true execved = true
} }
handler.Debug("ptrace stop exec") handler.Debug("ptrace stop exec")