Refactor config parsing

This commit is contained in:
criyle 2020-11-29 09:00:12 -08:00
parent fb9dff26b6
commit a807a66176
25 changed files with 354 additions and 229 deletions

View File

@ -38,7 +38,7 @@ Install GO 1.13+ from [download](https://golang.org/dl/)
```bash
go get github.com/criyle/go-judge/cmd/executorserver
sudo ~/go/bin/executorserver # or executorserver if $(GOPATH)/bin is in your $PATH
~/go/bin/executorserver # or executorserver if $(GOPATH)/bin is in your $PATH
```
Or, by docker
@ -49,29 +49,30 @@ docker run -it --rm --privileged -p 5050:5050 criyle/executorserver:demo
Build by your own `docker build -t executorserver -f Dockerfile.exec .`
The `executorserver` need root privilege to create `cgroup`. Either creates sub-directory `/sys/fs/cgroup/cpuacct/go-judger`, `/sys/fs/cgroup/memory/go-judger`, `/sys/fs/cgroup/pids/go-judger` and make execution user readable or use `sudo` to run it.
The `executorserver` need root privilege to create `cgroup`. Either creates sub-directory `/sys/fs/cgroup/cpuacct/executor_server`, `/sys/fs/cgroup/memory/executor_server`, `/sys/fs/cgroup/pids/executor_server` and make execution user readable or use `sudo` to run it.
#### Command Line Arguments
- The default binding address for the executor server is `:5050`. Can be specified with `-http` flag.
- The default binding address for the gRPC executor server is `:5051`. Can be specified with `-grpc` flag. (Notice: need to set `GRPC=1` environment variable to enable GRPC endpoint)
- The default binding address for the executor server is `:5050`. Can be specified with `-http-addr` flag.
- The default binding address for the gRPC executor server is `:5051`. Can be specified with `-grpc-addr` flag. (Notice: need to set `ES_ENABLE_GRPC=1` environment variable to enable GRPC endpoint)
- The default concurrency is `4`, Can be specified with `-parallelism` flag.
- The default file store is in memory, local cache can be specified with `-dir` flag.
- The default log level is debug, use `-silent` to disable logs.
- `-token` to add token-based authentication to REST / gRPC
- `-srcprefix` to restrict `src` copyIn path (need to be absolute path)
- `-src-prefix` to restrict `src` copyIn path (need to be absolute path)
- `-time-limit-checker-interval` specifies time limit checker interval (default 100ms)
#### Environment Variables
Environment variable will override command line arguments if they both present.
- The http binding address specifies as `HTTP_ADDR=addr`
- The grpc binding address specifies as `GRPC_ADDR=addr`
- The parallelism specifies as `PARALLELISM=4`
- The token specifies as `TOKEN=token`
- `GRPC=1` enables gRPC
- `METRICS=1` enables metrics
- `DEBUG=1` enables debug
- The http binding address specifies as `ES_HTTP_ADDR=addr`
- The grpc binding address specifies as `ES_GRPC_ADDR=addr`
- The parallelism specifies as `ES_PARALLELISM=4`
- The token specifies as `ES_AUTH_TOKEN=token`
- `ES_ENABLE_GRPC=1` enables gRPC
- `ES_ENABLE_METRICS=1` enables metrics
- `ES_ENABLE_DEBUG=1` enables debug
### Build Shared object
@ -191,6 +192,7 @@ Transfer/sec: 124.38KB
- [x] GRPC + protobuf support
- [x] Token-based authentication
- [x] Prometheus metrics support
- [ ] Customize container work dir
## API interface

View File

@ -5,10 +5,16 @@ import (
"runtime"
"github.com/criyle/go-judge/cmd/executorserver/model"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
)
func handleRun(c *gin.Context) {
type cmdHandle struct {
worker worker.Worker
srcPrefix string
}
func (h *cmdHandle) handleRun(c *gin.Context) {
var req model.Request
if err := c.ShouldBindJSON(&req); err != nil {
c.Error(err)
@ -20,13 +26,15 @@ func handleRun(c *gin.Context) {
c.AbortWithStatusJSON(http.StatusBadRequest, "no cmd provided")
return
}
r, err := model.ConvertRequest(&req, *srcPrefix)
r, err := model.ConvertRequest(&req, h.srcPrefix)
if err != nil {
c.Error(err)
c.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
rt := <-work.Submit(c.Request.Context(), r)
logger.Sugar().Debugf("request: %+v", r)
rt := <-h.worker.Submit(c.Request.Context(), r)
logger.Sugar().Debugf("response: %+v", rt)
execObserve(rt)
if rt.Error != nil {
c.Error(rt.Error)

View File

@ -0,0 +1,60 @@
package config
import (
"os"
"time"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/koding/multiconfig"
)
// Config defines executor server configuration
type Config struct {
// container
ContainerInitPath string `flagUsage:"container init path"`
PreFork int `flagUsage:"control # of the prefork workers"`
TmpFsParam string `flagUsage:"tmpfs mount data (only for default mount with no mount.yaml)" default:"size=16m,nr_inodes=4k"`
NetShare bool `flagUsage:"share net namespace with host"`
MountConf string `flagUsage:"specifics mount configuration file" default:"mount.yaml"`
Parallelism int `flagUsage:"control the # of concurrency execution" default:"4"`
CgroupPrefix string `flagUsage:"control cgroup prefix" default:"executor_server"`
// file store
SrcPrefix string `flagUsage:"specifies directory prefix for source type copyin"`
Dir string `flagUsage:"specifies directory to store file upload / download (in memory by default)"`
// runner limit
TimeLimitCheckerInterval time.Duration `flagUsage:"specifies time limit checker interval" default:"100ms"`
ExtraMemoryLimit envexec.Size `flagUsage:"specifies extra memory buffer for check memory limit (default: 16k)"`
// server config
HTTPAddr string `flagUsage:"specifies the http binding address" default:":5050"`
EnableGRPC bool `flagUsage:"enable gRPC endpoint"`
GRPCAddr string `flagUsage:"specifies the grpc binding address" default:":5051"`
AuthToken string `flagUsage:"bearer token auth for REST / gRPC"`
EnableDebug bool `flagUsage:"enable debug endpoint"`
EnableMetrics bool `flagUsage:"enable promethus metrics endpoint"`
// logger config
Release bool `flagUsage:"release level of logs"`
Silent bool `flagUsage:"do not print logs"`
}
// Load loads config from flag & environment variables
func (c *Config) Load() error {
cl := multiconfig.MultiLoader(
&multiconfig.TagLoader{},
&multiconfig.EnvironmentLoader{
Prefix: "ES",
CamelCase: true,
},
&multiconfig.FlagLoader{
CamelCase: true,
EnvPrefix: "ES",
},
)
if os.Getpid() == 1 {
c.Release = true
}
return cl.Load(c)
}

View File

@ -24,6 +24,7 @@ var buffPool = sync.Pool{
type execServer struct {
pb.UnimplementedExecutorServer
worker worker.Worker
fs filestore.FileStore
srcPrefix string
}
@ -36,7 +37,7 @@ func (e *execServer) Exec(ctx context.Context, req *pb.Request) (*pb.Response, e
if len(si) > 0 || len(so) > 0 {
return nil, fmt.Errorf("Stream in / out are not avaliable for exec request")
}
rt := <-work.Submit(ctx, r)
rt := <-e.worker.Submit(ctx, r)
execObserve(rt)
if rt.Error != nil {
return nil, err

View File

@ -56,7 +56,7 @@ func (e *execServer) ExecStream(es pb.Executor_ExecStreamServer) error {
}
}
rtCh := work.Execute(es.Context(), rq)
rtCh := e.worker.Execute(es.Context(), rq)
for {
select {
case err := <-errCh:

View File

@ -10,10 +10,10 @@ import (
"net/http"
"os"
"os/signal"
"strconv"
"strings"
"time"
"github.com/criyle/go-judge/cmd/executorserver/config"
"github.com/criyle/go-judge/env"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pb"
@ -37,96 +37,20 @@ import (
"google.golang.org/grpc/status"
)
const (
envDebug = "DEBUG"
envMetrics = "METRICS"
envAddr = "HTTP_ADDR"
envGRPC = "GRPC"
envGRPCAddr = "GRPC_ADDR"
envParallelism = "PARALLELISM"
envToken = "TOKEN"
envRelease = "RELEASE"
envPrefork = "PREFORK"
)
var (
addr = flag.String("http", ":5050", "specifies the http binding address")
grpcAddr = flag.String("grpc", ":5051", "specifies the grpc binding address")
parallelism = flag.Int("parallelism", 4, "control the # of concurrency execution")
tmpFsParam = flag.String("tmpfs", "size=16m,nr_inodes=4k", "tmpfs mount data (only for default mount with no mount.yaml)")
dir = flag.String("dir", "", "specifies directory to store file upload / download (in memory by default)")
silent = flag.Bool("silent", false, "do not print logs")
netShare = flag.Bool("net", false, "do not unshare net namespace with host")
mountConf = flag.String("mount", "mount.yaml", "specifics mount configuration file")
cinitPath = flag.String("cinit", "", "container init absolute path")
token = flag.String("token", "", "bearer token auth for REST / gRPC")
release = flag.Bool("release", false, "use release mode for log")
srcPrefix = flag.String("srcprefix", "", "specifies directory prefix for source type copyin")
prefork = flag.Int("prefork", 0, "control # of the prefork workers")
printLog = func(v ...interface{}) {}
work *worker.Worker
)
func newFilsStore(dir string) filestore.FileStore {
var fs filestore.FileStore
if dir == "" {
fs = filestore.NewFileMemoryStore()
} else {
os.MkdirAll(dir, 0755)
fs = filestore.NewFileLocalStore(dir)
}
return fs
}
func initEnv() (bool, error) {
eneableGRPC := false
if s := os.Getenv(envAddr); s != "" {
addr = &s
}
if os.Getenv(envGRPC) == "1" {
eneableGRPC = true
}
if s := os.Getenv(envGRPCAddr); s != "" {
eneableGRPC = true
grpcAddr = &s
}
if s := os.Getenv(envParallelism); s != "" {
p, err := strconv.Atoi(s)
if err != nil {
return false, err
}
parallelism = &p
}
if s := os.Getenv(envToken); s != "" {
token = &s
}
if s := os.Getenv(envPrefork); s != "" {
p, err := strconv.Atoi(s)
if err != nil {
return false, err
}
prefork = &p
}
if os.Getpid() == 1 || os.Getenv(envRelease) == "1" {
*release = true
}
return eneableGRPC, nil
}
var logger *zap.Logger
func main() {
flag.Parse()
enableGRPC, err := initEnv()
if err != nil {
log.Fatalln("init environment variable failed", err)
var conf config.Config
if err := conf.Load(); err != nil {
if err == flag.ErrHelp {
return
}
log.Fatalln("load config failed", err)
}
var logger *zap.Logger
if !*silent {
if *release {
if !conf.Silent {
var err error
if conf.Release {
logger, err = zap.NewProduction()
} else {
config := zap.NewDevelopmentConfig()
@ -137,23 +61,30 @@ func main() {
log.Fatalln("init logger failed", err)
}
defer logger.Sync()
printLog = logger.Sugar().Info
} else {
logger = zap.NewNop()
}
logger.Sugar().Infof("config loaded: %+v", conf)
// Init environment pool
fs := newFilsStore(*dir)
b, err := env.NewBuilder(*cinitPath, *mountConf, *tmpFsParam, *netShare, printLog)
fs := newFilsStore(conf.Dir)
b, err := env.NewBuilder(env.Config{
ContainerInitPath: conf.ContainerInitPath,
MountConf: conf.MountConf,
TmpFsParam: conf.TmpFsParam,
NetShare: conf.NetShare,
CgroupPrefix: conf.CgroupPrefix,
Logger: logger.Sugar(),
})
if err != nil {
log.Fatalln("create environment builder failed", err)
}
envPool := pool.NewPool(b)
if *prefork > 0 {
printLog("create ", *prefork, " prefork containers")
m := make([]envexec.Environment, 0, *prefork)
for i := 0; i < *prefork; i++ {
if conf.PreFork > 0 {
logger.Sugar().Info("create ", conf.PreFork, " prefork containers")
m := make([]envexec.Environment, 0, conf.PreFork)
for i := 0; i < conf.PreFork; i++ {
e, err := envPool.Get()
if err != nil {
log.Fatalln("prefork environment failed", err)
@ -164,16 +95,23 @@ func main() {
envPool.Put(e)
}
}
work = worker.New(fs, envPool, *parallelism, *dir)
work := worker.New(worker.Config{
FileStore: fs,
EnvironmentPool: envPool,
Parallelism: conf.Parallelism,
WorkDir: conf.Dir,
TimeLimitTickInterval: conf.TimeLimitCheckerInterval,
})
work.Start()
printLog("Starting worker with parallelism=", *parallelism)
logger.Sugar().Infof("Starting worker with parallelism=%d, workdir=%s, timeLimitCheckInterval=%v",
conf.Parallelism, conf.Dir, conf.TimeLimitCheckerInterval)
var r *gin.Engine
if *release {
if conf.Release {
gin.SetMode(gin.ReleaseMode)
}
r = gin.New()
if *silent {
if conf.Silent {
r.Use(gin.Recovery())
} else {
r.Use(ginzap.Ginzap(logger, time.RFC3339, true))
@ -181,7 +119,7 @@ func main() {
}
// Metrics Handle
if os.Getenv(envMetrics) == "1" {
if conf.EnableMetrics {
p := ginprometheus.NewPrometheus("gin")
p.ReqCntURLLabelMappingFn = func(c *gin.Context) string {
url := c.Request.URL.Path
@ -199,9 +137,9 @@ func main() {
r.GET("/version", handleVersion)
// Add auth token
if *token != "" {
r.Use(tokenAuth(*token))
printLog("Attach token auth with token:", *token)
if conf.AuthToken != "" {
r.Use(tokenAuth(conf.AuthToken))
logger.Sugar().Info("Attach token auth with token:", conf.AuthToken)
}
// File Handles
@ -212,19 +150,21 @@ func main() {
r.DELETE("/file/:fid", fh.fileIDDelete)
// Run Handle
r.POST("/run", handleRun)
rh := &cmdHandle{worker: work, srcPrefix: conf.SrcPrefix}
r.POST("/run", rh.handleRun)
// WebSocket Handle
r.GET("/ws", handleWS)
wh := &wsHandle{worker: work, srcPrefix: conf.SrcPrefix}
r.GET("/ws", wh.handleWS)
// pprof
if os.Getenv(envDebug) != "" {
if conf.EnableDebug {
ginpprof.Register(r)
}
// gRPC server
var grpcServer *grpc.Server
if enableGRPC {
if conf.EnableGRPC {
grpc_zap.ReplaceGrpcLoggerV2(logger)
streamMiddleware := []grpc.StreamServerInterceptor{
grpc_prometheus.StreamServerInterceptor,
@ -236,8 +176,8 @@ func main() {
grpc_zap.UnaryServerInterceptor(logger),
grpc_recovery.UnaryServerInterceptor(),
}
if *token != "" {
authFunc := grpcTokenAuth(*token)
if conf.AuthToken != "" {
authFunc := grpcTokenAuth(conf.AuthToken)
streamMiddleware = append(streamMiddleware, grpc_auth.StreamServerInterceptor(authFunc))
unaryMiddleware = append(unaryMiddleware, grpc_auth.UnaryServerInterceptor(authFunc))
}
@ -245,28 +185,32 @@ func main() {
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamMiddleware...)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryMiddleware...)),
)
pb.RegisterExecutorServer(grpcServer, &execServer{fs: fs, srcPrefix: *srcPrefix})
pb.RegisterExecutorServer(grpcServer, &execServer{
fs: fs,
worker: work,
srcPrefix: conf.SrcPrefix,
})
grpc_prometheus.Register(grpcServer)
grpc_prometheus.EnableHandlingTimeHistogram()
lis, err := net.Listen("tcp", *grpcAddr)
lis, err := net.Listen("tcp", conf.GRPCAddr)
if err != nil {
log.Fatalln(err)
}
go func() {
printLog("Starting grpc server at", *grpcAddr)
printLog("GRPC serve", grpcServer.Serve(lis))
logger.Sugar().Info("Starting gRPC server at ", conf.GRPCAddr)
logger.Sugar().Info("gRPC serve finished: ", grpcServer.Serve(lis))
}()
}
srv := http.Server{
Addr: *addr,
Addr: conf.HTTPAddr,
Handler: r,
}
go func() {
printLog("Starting http server at", *addr)
printLog("Http serve", srv.ListenAndServe())
logger.Sugar().Info("Starting http server at ", conf.HTTPAddr)
logger.Sugar().Info("Http serve finished: ", srv.ListenAndServe())
}()
// Graceful shutdown...
@ -274,33 +218,33 @@ func main() {
signal.Notify(sig, os.Interrupt)
<-sig
printLog("Shutting Down...")
logger.Sugar().Info("Shutting Down...")
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*3)
defer cancel()
var eg errgroup.Group
eg.Go(func() error {
printLog("Http server shutdown")
logger.Sugar().Info("Http server shutdown")
return srv.Shutdown(ctx)
})
eg.Go(func() error {
work.Shutdown()
printLog("Worker shutdown")
logger.Sugar().Info("Worker shutdown")
return nil
})
if grpcServer != nil {
eg.Go(func() error {
grpcServer.GracefulStop()
printLog("GRPC server shutdown")
logger.Sugar().Info("GRPC server shutdown")
return nil
})
}
go func() {
printLog("Shutdown Finished", eg.Wait())
logger.Sugar().Info("Shutdown Finished: ", eg.Wait())
cancel()
}()
<-ctx.Done()
@ -330,3 +274,14 @@ func grpcTokenAuth(token string) func(context.Context) (context.Context, error)
return ctx, nil
}
}
func newFilsStore(dir string) filestore.FileStore {
var fs filestore.FileStore
if dir == "" {
fs = filestore.NewFileMemoryStore()
} else {
os.MkdirAll(dir, 0755)
fs = filestore.NewFileLocalStore(dir)
}
return fs
}

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/criyle/go-judge/cmd/executorserver/model"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
@ -22,7 +23,12 @@ const (
pingPeriod = 50 * time.Second
)
func handleWS(c *gin.Context) {
type wsHandle struct {
worker worker.Worker
srcPrefix string
}
func (h *wsHandle) handleWS(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
c.Error(err)
@ -45,16 +51,16 @@ func handleWS(c *gin.Context) {
for {
req := new(model.Request)
if err := conn.ReadJSON(req); err != nil {
printLog("ws read:", err)
logger.Sugar().Warn("ws read error:", err)
return
}
r, err := model.ConvertRequest(req, *srcPrefix)
r, err := model.ConvertRequest(req, h.srcPrefix)
if err != nil {
printLog("convert", err)
logger.Sugar().Warn("convert error: ", err)
return
}
go func() {
ret := <-work.Submit(ctx, r)
ret := <-h.worker.Submit(ctx, r)
execObserve(ret)
resultCh <- model.ConvertResponse(ret)
}()
@ -71,7 +77,7 @@ func handleWS(c *gin.Context) {
case r := <-resultCh:
conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := conn.WriteJSON(r); err != nil {
printLog("ws write:", err)
logger.Sugar().Warn("ws write error:", err)
return
}
case <-ticker.C:

View File

@ -8,6 +8,7 @@ import (
"encoding/json"
"log"
"os"
"time"
"github.com/criyle/go-judge/cmd/executorserver/model"
"github.com/criyle/go-judge/env"
@ -17,18 +18,19 @@ import (
)
type initParameter struct {
CInitPath string `json:"cinitPath`
Parallelism int `json:"parallelism"`
TmpFsParam string `json:"tmpfsParam"`
Dir string `json:"dir"`
NetShare bool `json:"netShare"`
MountConf string `json:"mountConf"`
SrcPrefix string `json:"srcPrefix"`
CInitPath string `json:"cinitPath"`
Parallelism int `json:"parallelism"`
TmpFsParam string `json:"tmpfsParam"`
Dir string `json:"dir"`
NetShare bool `json:"netShare"`
MountConf string `json:"mountConf"`
SrcPrefix string `json:"srcPrefix"`
CgroupPrefix string `json:"cgroupPrefix"`
}
var (
fs filestore.FileStore
work *worker.Worker
work worker.Worker
srcPrefix string
)
@ -69,13 +71,25 @@ func Init(i *C.char) C.int {
fs = newFilsStore(ip.Dir)
printLog := func(v ...interface{}) {}
b, err := env.NewBuilder(ip.CInitPath, ip.MountConf, ip.TmpFsParam, ip.NetShare, printLog)
b, err := env.NewBuilder(env.Config{
ContainerInitPath: ip.CInitPath,
MountConf: ip.MountConf,
TmpFsParam: ip.TmpFsParam,
NetShare: ip.NetShare,
CgroupPrefix: ip.CgroupPrefix,
Logger: nopLogger{},
})
if err != nil {
log.Fatalln("create environment builder failed", err)
}
envPool := pool.NewPool(b)
work = worker.New(fs, envPool, ip.Parallelism, ip.Dir)
work = worker.New(worker.Config{
FileStore: fs,
EnvironmentPool: envPool,
Parallelism: ip.Parallelism,
WorkDir: ip.Dir,
TimeLimitTickInterval: 100 * time.Millisecond,
})
work.Start()
return 0
@ -176,3 +190,18 @@ func FileDelete(e *C.char) *C.char {
}
return C.CString("")
}
type nopLogger struct {
}
func (nopLogger) Debug(args ...interface{}) {
}
func (nopLogger) Info(args ...interface{}) {
}
func (nopLogger) Warn(args ...interface{}) {
}
func (nopLogger) Error(args ...interface{}) {
}

19
env/config.go vendored Normal file
View File

@ -0,0 +1,19 @@
package env
// Logger defines logger to print logs
type Logger interface {
Debug(args ...interface{})
Info(args ...interface{})
Warn(args ...interface{})
Error(args ...interface{})
}
// Config defines parameters to create environment builder
type Config struct {
ContainerInitPath string
TmpFsParam string
NetShare bool
MountConf string
CgroupPrefix string
Logger
}

6
env/env_darwin.go vendored
View File

@ -16,8 +16,8 @@ var defaultWrite = []string{
}
// NewBuilder build a environment builder
func NewBuilder(cinitPath, mountConf, tmpFsConf string, netShare bool, printLog func(v ...interface{})) (pool.EnvBuilder, error) {
b := macsandbox.NewBuilder("", defaultRead, defaultWrite, netShare)
printLog("created mac sandbox at", "")
func NewBuilder(c Config) (pool.EnvBuilder, error) {
b := macsandbox.NewBuilder("", defaultRead, defaultWrite, c.NetShare)
c.Info("created mac sandbox at", "")
return b, nil
}

24
env/env_linux.go vendored
View File

@ -13,26 +13,26 @@ import (
)
// NewBuilder build a environment builder
func NewBuilder(cinitPath, mountConf, tmpFsConf string, netShare bool, printLog func(v ...interface{})) (pool.EnvBuilder, error) {
func NewBuilder(c Config) (pool.EnvBuilder, error) {
root, err := ioutil.TempDir("", "executorserver")
if err != nil {
return nil, err
}
printLog("Created tmp dir for container root at:", root)
c.Info("Created tmp dir for container root at:", root)
mb, err := parseMountConfig(mountConf)
mb, err := parseMountConfig(c.MountConf)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
printLog("Use the default container mount")
mb = getDefaultMount(tmpFsConf)
c.Info("Use the default container mount")
mb = getDefaultMount(c.TmpFsParam)
}
m := mb.FilterNotExist().Mounts
printLog("Created container mount at:", mb)
c.Info("Created container mount at:", mb)
unshareFlags := uintptr(forkexec.UnshareFlags)
if netShare {
if c.NetShare {
unshareFlags ^= syscall.CLONE_NEWNET
}
@ -48,16 +48,16 @@ func NewBuilder(cinitPath, mountConf, tmpFsConf string, netShare bool, printLog
CredGenerator: credGen,
Stderr: os.Stderr,
CloneFlags: unshareFlags,
ExecFile: cinitPath,
ExecFile: c.ContainerInitPath,
}
cgb, err := cgroup.NewBuilder("executorserver").WithCPUAcct().WithMemory().WithPids().FilterByEnv()
cgb, err := cgroup.NewBuilder(c.CgroupPrefix).WithCPUAcct().WithMemory().WithPids().FilterByEnv()
if err != nil {
return nil, err
}
printLog("Created cgroup builder with:", cgb)
c.Info("Test created cgroup builder with:", cgb)
if cg, err := cgb.Build(); err != nil {
printLog("Tested created cgroup with error", err)
printLog("Failed back to rlimit / rusage mode")
c.Warn("Tested created cgroup with error", err)
c.Warn("Failed back to rlimit / rusage mode")
cgb = nil
} else {
cg.Destroy()

2
env/env_others.go vendored
View File

@ -9,6 +9,6 @@ import (
"github.com/criyle/go-judge/pkg/pool"
)
func NewBuilder(cinitPath, mountConf, tmpFsConf string, netShare bool, printLog func(v ...interface{})) (pool.EnvBuilder, error) {
func NewBuilder(c Config) (pool.EnvBuilder, error) {
return nil, errors.New("environment is not support on this platform" + runtime.GOOS)
}

4
env/env_windows.go vendored
View File

@ -6,11 +6,11 @@ import (
)
// NewBuilder build a environment builder
func NewBuilder(cinitPath, mountConf, tmpFsConf string, netShare bool, printLog func(v ...interface{})) (pool.EnvBuilder, error) {
func NewBuilder(c Config) (pool.EnvBuilder, error) {
b, err := winc.NewBuilder("")
if err != nil {
return nil, err
}
printLog("created winc builder")
c.Info("created winc builder")
return b, nil
}

3
go.mod
View File

@ -6,6 +6,8 @@ require (
cloud.google.com/go v0.72.0 // indirect
github.com/creack/pty v1.1.11
github.com/criyle/go-sandbox v0.5.1
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/gin-contrib/pprof v1.3.0
github.com/gin-contrib/zap v0.0.1
github.com/gin-gonic/gin v1.6.3
@ -13,6 +15,7 @@ require (
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7
github.com/prometheus/client_golang v1.8.0
github.com/zsais/go-gin-prometheus v0.1.0
go.uber.org/zap v1.16.0

6
go.sum
View File

@ -99,7 +99,11 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -256,6 +260,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7 h1:SWlt7BoQNASbhTUD0Oy5yysI2seJ7vWuGUp///OM4TM=
github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7/go.mod h1:Y2SaZf2Rzd0pXkLVhLlCiAXFCLSXAIbTKDivVgff/AM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=

View File

@ -8,6 +8,12 @@ import (
"github.com/criyle/go-sandbox/runner"
)
// Size represent data size in bytes
type Size = runner.Size
// RunnerResult represent process finish result
type RunnerResult = runner.Result
// Cmd defines instruction to run a program in container environment
type Cmd struct {
// argument, environment
@ -23,17 +29,18 @@ type Cmd struct {
TTY bool // use pty as input / output
// resource limits
TimeLimit time.Duration
MemoryLimit runner.Size
StackLimit runner.Size
ProcLimit uint64
TimeLimit time.Duration
MemoryLimit Size
StackLimit Size
ExtraMemoryLimit Size
ProcLimit uint64
// file contents to copyin before exec
CopyIn map[string]file.File
// file names to copyout after exec
CopyOut []string
CopyOutMax runner.Size // file size limit
CopyOutMax Size // file size limit
// CopyOutDir specifies a dir to dump all /w contnet
CopyOutDir string
@ -60,7 +67,7 @@ type Result struct {
Time time.Duration
RunTime time.Duration
Memory runner.Size // byte
Memory Size // byte
// Files stores copy out files
Files map[string]file.File

View File

@ -1,5 +1,5 @@
package envexec
const (
memoryLimitExtra uint64 = 16 << 10 // 16k more memory
defaultExtraMemoryLimit = Size(16 << 10) // 16k more memory
)

View File

@ -4,8 +4,6 @@ import (
"context"
"os"
"time"
"github.com/criyle/go-sandbox/runner"
)
// ExecveParam is parameters to run process inside environment
@ -32,21 +30,21 @@ type ExecveParam struct {
// Limit defines the process running resource limits
type Limit struct {
Time time.Duration // Time limit
Memory runner.Size // Memory limit
Memory Size // Memory limit
Proc uint64 // Process count limit
Stack runner.Size // Stack limit
Stack Size // Stack limit
}
// Usage defines the peak process resource usage
type Usage struct {
Time time.Duration
Memory runner.Size
Memory Size
}
// Process reference to the running process group
type Process interface {
Done() <-chan struct{} // Done returns a channel for wait process to exit
Result() runner.Result // Result is available after done is closed
Result() RunnerResult // Result is available after done is closed
Usage() Usage // Usage retrieves the process usage during the run time
}

View File

@ -21,11 +21,16 @@ func runSingle(pc context.Context, m Environment, c *Cmd, fds []*os.File, ptc []
}
}
memoryLimit := c.MemoryLimit + runner.Size(memoryLimitExtra)
extraMemoryLimit := c.ExtraMemoryLimit
if extraMemoryLimit == 0 {
extraMemoryLimit = defaultExtraMemoryLimit
}
var stackLimit runner.Size
memoryLimit := c.MemoryLimit + extraMemoryLimit
var stackLimit Size
if c.StackLimit > 0 {
stackLimit = c.StackLimit + runner.Size(memoryLimitExtra)
stackLimit = c.StackLimit + extraMemoryLimit
}
if stackLimit > memoryLimit {
stackLimit = memoryLimit

View File

@ -3,8 +3,8 @@ package pool
import (
"time"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/criyle/go-sandbox/pkg/cgroup"
"github.com/criyle/go-sandbox/runner"
)
var (
@ -13,7 +13,7 @@ var (
type wCgroup cgroup.Cgroup
func (c *wCgroup) SetMemoryLimit(s runner.Size) error {
func (c *wCgroup) SetMemoryLimit(s envexec.Size) error {
return (*cgroup.Cgroup)(c).SetMemoryLimitInBytes(uint64(s))
}
@ -26,18 +26,18 @@ func (c *wCgroup) CPUUsage() (time.Duration, error) {
return time.Duration(t), err
}
func (c *wCgroup) MemoryUsage() (runner.Size, error) {
func (c *wCgroup) MemoryUsage() (envexec.Size, error) {
s, err := (*cgroup.Cgroup)(c).MemoryMaxUsageInBytes()
if err != nil {
return 0, err
}
return runner.Size(s), nil
return envexec.Size(s), nil
// not really useful if creates new
// cache, err := (*cgroup.CGroup)(c).FindMemoryStatProperty("cache")
// if err != nil {
// return 0, err
// }
// return runner.Size(s - cache), err
// return envexec.Size(s - cache), err
}
func (c *wCgroup) AddProc(pid int) error {

View File

@ -4,16 +4,16 @@ import (
"sync"
"time"
"github.com/criyle/go-sandbox/runner"
"github.com/criyle/go-judge/pkg/envexec"
)
// Cgroup defines interface to limit and monitor resources consumption of a process
type Cgroup interface {
SetMemoryLimit(runner.Size) error
SetMemoryLimit(envexec.Size) error
SetProcLimit(uint64) error
CPUUsage() (time.Duration, error)
MemoryUsage() (runner.Size, error)
MemoryUsage() (envexec.Size, error)
AddProc(int) error
Reset() error

View File

@ -43,7 +43,7 @@ func (p *process) Done() <-chan struct{} {
return p.done
}
func (p *process) Result() runner.Result {
func (p *process) Result() envexec.RunnerResult {
<-p.done
return p.rt
}
@ -51,7 +51,7 @@ func (p *process) Result() runner.Result {
func (p *process) Usage() envexec.Usage {
var (
t time.Duration
m runner.Size
m envexec.Size
)
if p.cg != nil {
t, _ = p.cg.CPUUsage()

View File

@ -5,7 +5,6 @@ import (
"unsafe"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/criyle/go-sandbox/runner"
"golang.org/x/sys/windows"
)
@ -13,7 +12,7 @@ var _ envexec.Process = &process{}
type process struct {
done chan struct{}
result runner.Result
result envexec.RunnerResult
hJob windows.Handle
hProcess windows.Handle
}
@ -22,7 +21,7 @@ func (p *process) Done() <-chan struct{} {
return p.done
}
func (p *process) Result() runner.Result {
func (p *process) Result() envexec.RunnerResult {
<-p.done
return p.result
}
@ -35,7 +34,7 @@ func (p *process) Usage() envexec.Usage {
}
}
func getJobOjbectUsage(hJob windows.Handle) (time.Duration, runner.Size, error) {
func getJobOjbectUsage(hJob windows.Handle) (time.Duration, envexec.Size, error) {
basicInfo := new(JOBOBJECT_BASIC_ACCOUNTING_INFORMATION)
if _, err := QueryInformationJobObject(hJob, JobObjectBasicAccountingInformation,
uintptr(unsafe.Pointer(basicInfo)), uint32(unsafe.Sizeof(*basicInfo)), nil); err != nil {
@ -48,6 +47,6 @@ func getJobOjbectUsage(hJob windows.Handle) (time.Duration, runner.Size, error)
uintptr(unsafe.Pointer(extendedLimitInfo)), uint32(unsafe.Sizeof(*extendedLimitInfo)), nil); err != nil {
return 0, 0, err
}
m := runner.Size(extendedLimitInfo.PeakJobMemoryUsed)
m := envexec.Size(extendedLimitInfo.PeakJobMemoryUsed)
return t, m, nil
}

View File

@ -7,9 +7,11 @@ import (
"github.com/criyle/go-judge/pkg/envexec"
)
const tickInterval = time.Second
// default tick interval 100 ms
const defaultTickInterval = 100 * time.Millisecond
type waiter struct {
tickInterval time.Duration
timeLimit time.Duration
realTimeLimit time.Duration
}
@ -21,6 +23,11 @@ func (w *waiter) Wait(ctx context.Context, u envexec.Process) bool {
start := time.Now()
tickInterval := w.tickInterval
if tickInterval == 0 {
tickInterval = defaultTickInterval
}
ticker := time.NewTicker(tickInterval)
defer ticker.Stop()

View File

@ -10,18 +10,36 @@ import (
"github.com/criyle/go-judge/file"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/criyle/go-sandbox/runner"
)
const maxWaiting = 512
// Worker defines executor worker
type Worker struct {
// Config defines worker configuration
type Config struct {
FileStore filestore.FileStore
EnvironmentPool envexec.EnvironmentPool
Parallelism int
WorkDir string
TimeLimitTickInterval time.Duration
}
// Worker defines interface for executor
type Worker interface {
Start()
Submit(context.Context, *Request) <-chan Response
Execute(context.Context, *Request) <-chan Response
Shutdown()
}
// worker defines executor worker
type worker struct {
fs filestore.FileStore
envPool envexec.EnvironmentPool
parallelism int
workDir string
timeLimitTickInterval time.Duration
startOnce sync.Once
stopOnce sync.Once
wg sync.WaitGroup
@ -36,17 +54,18 @@ type workRequest struct {
}
// New creates new worker
func New(fs filestore.FileStore, pool envexec.EnvironmentPool, parallelism int, workDir string) *Worker {
return &Worker{
fs: fs,
envPool: pool,
parallelism: parallelism,
workDir: workDir,
func New(conf Config) Worker {
return &worker{
fs: conf.FileStore,
envPool: conf.EnvironmentPool,
parallelism: conf.Parallelism,
workDir: conf.WorkDir,
timeLimitTickInterval: conf.TimeLimitTickInterval,
}
}
// Start starts worker loops with given parallelism
func (w *Worker) Start() {
func (w *worker) Start() {
w.startOnce.Do(func() {
w.workCh = make(chan workRequest, maxWaiting)
w.done = make(chan struct{})
@ -58,7 +77,7 @@ func (w *Worker) Start() {
}
// Submit submits a single request
func (w *Worker) Submit(ctx context.Context, req *Request) <-chan Response {
func (w *worker) Submit(ctx context.Context, req *Request) <-chan Response {
ch := make(chan Response, 1)
w.workCh <- workRequest{
Request: req,
@ -69,7 +88,7 @@ func (w *Worker) Submit(ctx context.Context, req *Request) <-chan Response {
}
// Execute will execute the request in new goroutine (bypass the parallelism limit)
func (w *Worker) Execute(ctx context.Context, req *Request) <-chan Response {
func (w *worker) Execute(ctx context.Context, req *Request) <-chan Response {
ch := make(chan Response, 1)
w.wg.Add(1)
go func() {
@ -85,14 +104,14 @@ func (w *Worker) Execute(ctx context.Context, req *Request) <-chan Response {
}
// Shutdown waits all worker to finish
func (w *Worker) Shutdown() {
func (w *worker) Shutdown() {
w.stopOnce.Do(func() {
close(w.done)
w.wg.Wait()
})
}
func (w *Worker) loop() {
func (w *worker) loop() {
defer w.wg.Done()
for {
select {
@ -107,7 +126,7 @@ func (w *Worker) loop() {
}
}
func (w *Worker) workDoCmd(req workRequest) {
func (w *worker) workDoCmd(req workRequest) {
var rt Response
if len(req.Cmd) == 1 {
rt = w.workDoSingle(req.Context, req.Cmd[0])
@ -118,7 +137,7 @@ func (w *Worker) workDoCmd(req workRequest) {
req.resultCh <- rt
}
func (w *Worker) workDoSingle(ctx context.Context, rc Cmd) (rt Response) {
func (w *worker) workDoSingle(ctx context.Context, rc Cmd) (rt Response) {
c, copyOutSet, err := w.prepareCmd(rc)
if err != nil {
rt.Error = err
@ -138,7 +157,7 @@ func (w *Worker) workDoSingle(ctx context.Context, rc Cmd) (rt Response) {
return
}
func (w *Worker) workDoGroup(ctx context.Context, rc []Cmd, pm []PipeMap) (rt Response) {
func (w *worker) workDoGroup(ctx context.Context, rc []Cmd, pm []PipeMap) (rt Response) {
var rts []Result
p := preparePipeMapping(pm)
cs := make([]*envexec.Cmd, 0, len(rc))
@ -172,7 +191,7 @@ func (w *Worker) workDoGroup(ctx context.Context, rc []Cmd, pm []PipeMap) (rt Re
return
}
func (w *Worker) convertResult(result envexec.Result, copyOutSet map[string]bool) (res Result) {
func (w *worker) convertResult(result envexec.Result, copyOutSet map[string]bool) (res Result) {
res.Status = result.Status
res.ExitStatus = result.ExitStatus
res.Error = result.Error
@ -204,7 +223,7 @@ func (w *Worker) convertResult(result envexec.Result, copyOutSet map[string]bool
return res
}
func (w *Worker) prepareCmd(rc Cmd) (*envexec.Cmd, map[string]bool, error) {
func (w *worker) prepareCmd(rc Cmd) (*envexec.Cmd, map[string]bool, error) {
files, pipeFileName, err := w.prepareCmdFiles(rc.Files)
if err != nil {
return nil, nil, err
@ -235,6 +254,7 @@ func (w *Worker) prepareCmd(rc Cmd) (*envexec.Cmd, map[string]bool, error) {
}
wait := &waiter{
tickInterval: w.timeLimitTickInterval,
timeLimit: time.Duration(rc.CPULimit),
realTimeLimit: time.Duration(rc.RealCPULimit),
}
@ -259,13 +279,13 @@ func (w *Worker) prepareCmd(rc Cmd) (*envexec.Cmd, map[string]bool, error) {
Files: files,
TTY: rc.TTY,
TimeLimit: timeLimit,
MemoryLimit: runner.Size(rc.MemoryLimit),
StackLimit: runner.Size(rc.StackLimit),
MemoryLimit: envexec.Size(rc.MemoryLimit),
StackLimit: envexec.Size(rc.StackLimit),
ProcLimit: rc.ProcLimit,
CopyIn: copyIn,
CopyOut: copyOut,
CopyOutDir: copyOutDir,
CopyOutMax: runner.Size(rc.CopyOutMax),
CopyOutMax: envexec.Size(rc.CopyOutMax),
Waiter: wait.Wait,
}, copyOutSet, nil
}
@ -281,7 +301,7 @@ func preparePipeMapping(pm []PipeMap) []*envexec.Pipe {
return rt
}
func (w *Worker) prepareCopyIn(cf map[string]CmdFile) (map[string]file.File, error) {
func (w *worker) prepareCopyIn(cf map[string]CmdFile) (map[string]file.File, error) {
rt := make(map[string]file.File)
for name, f := range cf {
if f == nil {
@ -300,7 +320,7 @@ func (w *Worker) prepareCopyIn(cf map[string]CmdFile) (map[string]file.File, err
return rt, nil
}
func (w *Worker) prepareCmdFiles(files []CmdFile) ([]interface{}, map[string]bool, error) {
func (w *worker) prepareCmdFiles(files []CmdFile) ([]interface{}, map[string]bool, error) {
rt := make([]interface{}, 0, len(files))
pipeFileName := make(map[string]bool)
for _, f := range files {