align interface for ptrace and namespace with deamon

This commit is contained in:
criyle 2019-08-16 22:12:08 -07:00
parent c7679b02fb
commit c9f2bd0ae8
5 changed files with 193 additions and 107 deletions

View File

@ -5,6 +5,7 @@ import (
"fmt"
"io/ioutil"
"os"
"os/signal"
"time"
"github.com/criyle/go-judger/cgroup"
@ -34,7 +35,7 @@ var (
// Runner can be ptraced runner or namespaced runner
type Runner interface {
Start() (specs.TraceResult, error)
Start(<-chan struct{}) (<-chan specs.TraceResult, error)
}
func printUsage() {
@ -85,8 +86,33 @@ func main() {
if workPath == "" {
workPath, _ = os.Getwd()
}
rt, f, err := run()
var (
f *os.File
err error
)
if result == "stdout" {
f = os.Stdout
} else if result == "stderr" {
f = os.Stderr
} else {
f, err = os.Create(result)
if err != nil {
debug("Failed to open result file:", err)
return
}
defer f.Close()
}
rt, err := run()
if rt == nil {
rt = &specs.TraceResult{
TraceStatus: specs.TraceCodeFatal,
}
}
if err == nil && rt.TraceStatus != specs.TraceCodeNormal {
err = rt.TraceStatus
}
if err != nil {
debug(err)
c, ok := err.(specs.TraceCode)
@ -103,14 +129,22 @@ func main() {
}
}
func run() (*specs.TraceResult, *os.File, error) {
type deamonRunner struct {
*deamon.Master
*deamon.ExecveParam
}
func (r *deamonRunner) Start(done <-chan struct{}) (<-chan specs.TraceResult, error) {
return r.Master.Execve(done, r.ExecveParam)
}
func run() (*specs.TraceResult, error) {
var (
runner Runner
cg *cgroup.CGroup
err error
execFile uintptr
rt specs.TraceResult
f *os.File
)
addRead := runconfig.GetExtraSet(addReadable, addRawReadable)
@ -120,11 +154,11 @@ func run() (*specs.TraceResult, *os.File, error) {
if useCGroup {
cg, err = cgroup.NewCGroup("run_program")
if err != nil {
return nil, nil, err
return nil, err
}
defer cg.Destroy()
if err = cg.SetMemoryLimitInBytes(memoryLimit << 20); err != nil {
return nil, nil, err
return nil, err
}
}
@ -140,11 +174,11 @@ func run() (*specs.TraceResult, *os.File, error) {
if memfile {
fin, err := os.Open(args[0])
if err != nil {
return nil, nil, fmt.Errorf("filed to open args[0]: %v", err)
return nil, fmt.Errorf("filed to open args[0]: %v", err)
}
execf, err := memfd.DupToMemfd("run_program", fin)
if err != nil {
return nil, nil, fmt.Errorf("dup to memfd failed: %v", err)
return nil, fmt.Errorf("dup to memfd failed: %v", err)
}
fin.Close()
defer execf.Close()
@ -155,7 +189,7 @@ func run() (*specs.TraceResult, *os.File, error) {
// open input / output / err files
files, err := prepareFiles(inputFileName, outputFileName, errorFileName)
if err != nil {
return nil, nil, fmt.Errorf("failed to prepare files: %v", err)
return nil, fmt.Errorf("failed to prepare files: %v", err)
}
defer closeFiles(files)
@ -177,52 +211,37 @@ func run() (*specs.TraceResult, *os.File, error) {
}
if useDeamon {
sTime := time.Now()
root, err := ioutil.TempDir("", "dm")
if err != nil {
return nil, nil, fmt.Errorf("cannot make temp root for deamon namespace: %v", err)
return nil, fmt.Errorf("cannot make temp root for deamon namespace: %v", err)
}
defer os.RemoveAll(root)
m, err := deamon.New(root)
if err != nil {
return nil, nil, fmt.Errorf("failed to new master: %v", err)
return nil, fmt.Errorf("failed to new master: %v", err)
}
defer m.Destroy()
err = m.Ping()
if err != nil {
return nil, nil, fmt.Errorf("failed to ping deamon: %v", err)
return nil, fmt.Errorf("failed to ping deamon: %v", err)
}
rTime := time.Now()
done := make(chan struct{})
s, err := m.Execve(done, &deamon.ExecveParam{
Args: args,
Envv: []string{pathEnv},
Fds: fds,
ExecFile: execFile,
RLimits: rlims.PrepareRLimit(),
SyncFunc: syncFunc,
})
if err != nil {
return nil, nil, fmt.Errorf("failed to execve: %v", err)
runner = &deamonRunner{
Master: m,
ExecveParam: &deamon.ExecveParam{
Args: args,
Envv: []string{pathEnv},
Fds: fds,
ExecFile: execFile,
RLimits: rlims.PrepareRLimit(),
SyncFunc: syncFunc,
},
}
tC := time.After(time.Duration(int64(realTimeLimit) * int64(time.Second)))
select {
case <-tC:
close(done)
rt = <-s
case rt = <-s:
}
eTime := time.Now()
rt.SetUpTime = int64(rTime.Sub(sTime))
rt.RunningTime = int64(eTime.Sub(rTime))
m.Destroy()
} else if namespace {
h.SyscallAllow = append(h.SyscallAllow, h.SyscallTrace...)
root, err := ioutil.TempDir("", "ns")
if err != nil {
return nil, nil, fmt.Errorf("cannot make temp root for new namespace")
return nil, fmt.Errorf("cannot make temp root for new namespace")
}
defer os.RemoveAll(root)
@ -273,38 +292,54 @@ func run() (*specs.TraceResult, *os.File, error) {
}
}
if result == "stdout" {
f = os.Stdout
} else if result == "stderr" {
f = os.Stderr
} else {
f, err := os.Create(result)
if err != nil {
return nil, nil, fmt.Errorf("Failed to open result file: %v", err)
}
defer f.Close()
}
// gracefully shutdown
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Run tracer
if runner != nil {
rt, err = runner.Start()
sTime := time.Now()
done := make(chan struct{})
s, err := runner.Start(done)
rTime := time.Now()
if err != nil {
return nil, fmt.Errorf("failed to execve: %v", err)
}
tC := time.After(time.Duration(int64(realTimeLimit) * int64(time.Second)))
select {
case <-sig:
close(done)
rt = <-s
rt.TraceStatus = specs.TraceCodeFatal
case <-tC:
close(done)
rt = <-s
case rt = <-s:
}
eTime := time.Now()
if rt.SetUpTime == 0 {
rt.SetUpTime = int64(rTime.Sub(sTime))
rt.RunningTime = int64(eTime.Sub(rTime))
}
debug("results:", rt, err)
if useCGroup {
cpu, err := cg.CpuacctUsage()
if err != nil {
return nil, nil, fmt.Errorf("cgroup cpu: %v", err)
return nil, fmt.Errorf("cgroup cpu: %v", err)
}
memory, err := cg.MemoryMaxUsageInBytes()
if err != nil {
return nil, nil, fmt.Errorf("cgroup memory: %v", err)
return nil, fmt.Errorf("cgroup memory: %v", err)
}
debug("cgroup: cpu: ", cpu, " memory: ", memory)
rt.UserTime = cpu / uint64(time.Millisecond)
rt.UserMem = memory >> 10
}
return &rt, f, nil
return &rt, nil
}
func debug(v ...interface{}) {

View File

@ -77,7 +77,8 @@ var (
// default syscalls to trace
defaultSyscallTraces = []string{
// should be traced
// execute file
"execve",
"execveat",
// file open

View File

@ -10,19 +10,19 @@ import (
)
// Start starts the tracing process
func (r *RunProgram) Start() (rt specs.TraceResult, err error) {
func (r *RunProgram) Start(done <-chan struct{}) (<-chan specs.TraceResult, error) {
// build seccomp filter
filter, err := buildFilter(r.ShowDetails, r.SyscallAllowed, r.SyscallTraced)
if err != nil {
println(err)
return
return nil, err
}
defer filter.Release()
bpf, err := seccomp.FilterToBPF(filter)
if err != nil {
println(err)
return
return nil, err
}
ch := &forkexec.Runner{
@ -42,7 +42,7 @@ func (r *RunProgram) Start() (rt specs.TraceResult, err error) {
Unsafe: r.Unsafe,
Handler: r.Handler,
}
return tracer.Trace(th, ch, specs.ResLimit(r.TraceLimit))
return tracer.Trace(done, th, ch, specs.ResLimit(r.TraceLimit))
}
// build filter builds the libseccomp filter according to the allow, trace and show details

View File

@ -3,7 +3,6 @@ package rununshared
import (
"fmt"
"os"
"syscall"
"time"
"github.com/criyle/go-judger/forkexec"
@ -19,18 +18,18 @@ const (
)
// Start starts the unshared process
func (r *RunUnshared) Start() (rt specs.TraceResult, err error) {
func (r *RunUnshared) Start(done <-chan struct{}) (<-chan specs.TraceResult, error) {
filter, err := seccomp.BuildFilter(libseccomp.ActKill, libseccomp.ActTrap, r.SyscallAllowed, []string{})
if err != nil {
println(err)
return
return nil, err
}
defer filter.Release()
bpf, err := seccomp.FilterToBPF(filter)
if err != nil {
println(err)
return
return nil, err
}
ch := &forkexec.Runner{
@ -51,18 +50,36 @@ func (r *RunUnshared) Start() (rt specs.TraceResult, err error) {
DropCaps: true,
SyncFunc: r.SyncFunc,
}
return r.Trace(ch)
result := make(chan specs.TraceResult, 1)
start := make(chan struct{})
finish := make(chan struct{})
// run
go func() {
defer close(finish)
ret, err2 := r.Trace(done, start, ch)
err = err2
result <- ret
}()
select {
case <-start:
case <-finish:
}
return result, err
}
// Trace tracks child processes
func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult, err error) {
func (r *RunUnshared) Trace(done <-chan struct{}, start chan<- struct{},
runner *forkexec.Runner) (result specs.TraceResult, err error) {
var (
wstatus unix.WaitStatus // wait4 wait status
rusage unix.Rusage // wait4 rusage
tle = false
status = specs.TraceCodeNormal
sTime = time.Now().UnixNano() // start time
fTime int64 // finish time for setup
sTime = time.Now() // start time
fTime time.Time // finish time for setup
)
// Start the runner
@ -72,29 +89,35 @@ func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult,
result.TraceStatus = specs.TraceCodeRE
return result, err
}
// Set real time limit, kill process after it
timer := time.AfterFunc(time.Duration(int64(r.ResLimits.RealTimeLimit)*1e6), func() {
tle = true
killAll(pgid)
})
close(start)
finish := make(chan struct{})
defer close(finish)
// handle cancel
go func() {
select {
case <-done:
tle = true
killAll(pgid)
case <-finish:
}
}()
defer func() {
timer.Stop()
if tle {
err = specs.TraceCodeTLE
}
// kill all tracee upon return
killAll(pgid)
collectZombie(pgid)
result.TraceStat.SetUpTime = fTime - sTime
result.RunningTime = time.Now().UnixNano() - fTime
result.SetUpTime = fTime.Sub(sTime).Nanoseconds()
result.RunningTime = time.Since(fTime).Nanoseconds()
}()
// currently, we do not have any way to track mount syscall time usage
fTime = time.Now().UnixNano()
fTime = time.Now()
for {
pid, err := unix.Wait4(pgid, &wstatus, unix.WALL, &rusage)
_, err := unix.Wait4(pgid, &wstatus, 0, &rusage)
r.println("wait4: ", wstatus)
if err != nil {
return result, specs.TraceCodeFatal
@ -127,7 +150,7 @@ func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult,
case wstatus.Signaled():
sig := wstatus.Signal()
switch sig {
case unix.SIGXCPU:
case unix.SIGXCPU, unix.SIGKILL:
status = specs.TraceCodeTLE
case unix.SIGXFSZ:
status = specs.TraceCodeOLE
@ -138,8 +161,6 @@ func (r *RunUnshared) Trace(runner *forkexec.Runner) (result specs.TraceResult,
}
result.TraceStatus = status
return result, status
case wstatus.Stopped():
unix.Kill(pid, syscall.SIGCONT)
}
}
return result, status
@ -152,10 +173,10 @@ func killAll(pgid int) {
// collect died child processes
func collectZombie(pgid int) {
var wstatus unix.WaitStatus
// collect zombies
for {
var wstatus unix.WaitStatus
if _, err := unix.Wait4(-pgid, &wstatus, unix.WALL|unix.WNOWAIT, nil); err != nil {
if _, err := unix.Wait4(-pgid, &wstatus, unix.WALL|unix.WNOHANG, nil); err != nil {
break
}
}

View File

@ -15,19 +15,41 @@ const (
MsgHandle
)
// Trace traces all child process that created by runner
// this function should called only once and in the same thread that
// exec tracee
func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.TraceResult, err error) {
// Trace starts new goroutine and trace runner with ptrace
func Trace(done <-chan struct{}, handler Handler, runner Runner, limits specs.ResLimit) (<-chan specs.TraceResult, error) {
var err error
result := make(chan specs.TraceResult, 1)
start := make(chan struct{})
finish := make(chan struct{})
// run
go func() {
defer close(finish)
ret, err2 := TraceRun(done, start, handler, runner, limits)
err = err2
result <- ret
}()
select {
case <-start:
case <-finish:
}
return result, err
}
// TraceRun start and traces all child process by runner in the calling goroutine
// parameter done used to cancel work, start is used notify child starts
func TraceRun(done <-chan struct{}, start chan<- struct{},
handler Handler, runner Runner, limits specs.ResLimit) (result specs.TraceResult, err error) {
var (
wstatus unix.WaitStatus // wait4 wait status
rusage unix.Rusage // wait4 rusage
tle bool // whether the timmer triggered due to timeout
traced = make(map[int]bool) // store all process that have set ptrace options
execved = false // store whether the runner process have successfully execvd
pid int // store pid of wait4 result
sTime = time.Now().UnixNano() // records start time for trace process
fTime int64 // records finish time for execve
wstatus unix.WaitStatus // wait4 wait status
rusage unix.Rusage // wait4 rusage
tle bool // whether the timmer triggered due to timeout
traced = make(map[int]bool) // store all process that have set ptrace options
execved = false // store whether the runner process have successfully execvd
pid int // store pid of wait4 result
sTime = time.Now() // records start time for trace process
fTime time.Time // records finish time for execve
)
// ptrace is thread based (kernel proc)
@ -43,16 +65,23 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
return result, err
}
// Set real time limit, kill process after it
timer := time.AfterFunc(time.Duration(int64(limits.RealTimeLimit)*1e6), func() {
tle = true
killAll(pgid)
})
close(start)
finish := make(chan struct{})
defer close(finish)
// handle cancelation
go func() {
select {
case <-done:
tle = true
killAll(pgid)
case <-finish:
}
}()
// handler potential panic and tle
// also ensure processes was well terminated
defer func() {
timer.Stop()
if tle {
err = specs.TraceCodeTLE
}
@ -63,8 +92,8 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
// kill all tracee upon return
killAll(pgid)
collectZombie(pgid)
result.TraceStat.SetUpTime = fTime - sTime
result.RunningTime = time.Now().UnixNano() - fTime
result.SetUpTime = fTime.Sub(sTime).Nanoseconds()
result.RunningTime = time.Since(fTime).Nanoseconds()
}()
// trace unixs
@ -125,7 +154,7 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
if pid == pgid {
delete(traced, pid)
switch sig {
case unix.SIGXCPU:
case unix.SIGXCPU, unix.SIGKILL:
status = specs.TraceCodeTLE
case unix.SIGXFSZ:
status = specs.TraceCodeOLE
@ -176,7 +205,7 @@ func Trace(handler Handler, runner Runner, limits specs.ResLimit) (result specs.
case unix.PTRACE_EVENT_EXEC:
// forked tracee have successfully called execve
if !execved {
fTime = time.Now().UnixNano()
fTime = time.Now()
execved = true
}
handler.Debug("ptrace stop exec")