main: allow -srf-prefix to pass list of path split by comma

This commit is contained in:
criyle 2023-04-14 03:46:30 -07:00
parent afb21cc2f7
commit 90de4c544c
8 changed files with 36 additions and 36 deletions

View File

@ -63,7 +63,7 @@
- 默认同时运行任务数为和 CPU 数量相同,使用 `-parallelism` 指定
- 默认文件存储在内存里,使用 `-dir` 指定本地目录为文件存储
- 默认 cgroup 的前缀为 `executor_server` ,使用 `-cgroup-prefix` 指定
- 默认没有磁盘文件复制限制,使用 `-src-prefix` 限制 copyIn 操作文件目录前缀(需要绝对路径)
- 默认没有磁盘文件复制限制,使用 `-src-prefix` 限制 copyIn 操作文件目录前缀,使用逗号 `,` 分隔(需要绝对路径)(例如:`/bin,/usr`
- 默认时间和内存使用检查周期为 100 毫秒(`100ms`),使用 `-time-limit-checker-interval` 指定
- 默认最大输出限制为 `256MiB`,使用 `-output-limit` 指定
- 默认最大打开文件描述符为 `256`,使用 `-open-file-limit` 指定

View File

@ -63,7 +63,7 @@ Sandbox:
- The default concurrency equal to number of CPU, Can be specified with `-parallelism` flag.
- The default file store is in memory, local cache can be specified with `-dir` flag.
- The default CGroup prefix is `executor_server`, Can be specified with `-cgroup-prefix` flag.
- `-src-prefix` to restrict `src` copyIn path (need to be absolute path)
- `-src-prefix` to restrict `src` copyIn path split by comma (need to be absolute path) (example: `/bin,/usr`)
- `-time-limit-checker-interval` specifies time limit checker interval (default 100ms) (valid value: \[1ms, 1s\])
- `-output-limit` specifies size limit of POSIX rlimit of output (default 256MiB)
- `-extra-memory-limit` specifies the additional memory limit to check memory limit exceeded (default 16KiB)

View File

@ -23,8 +23,8 @@ type Config struct {
ContainerCredStart int `flagUsage:"control the start uid&gid for container (0 uses unprivileged root)" default:"0"`
// file store
SrcPrefix string `flagUsage:"specifies directory prefix for source type copyin"`
Dir string `flagUsage:"specifies directory to store file upload / download (in memory by default)"`
SrcPrefix []string `flagUsage:"specifies directory prefix for source type copyin (example: -src-prefix=/home,/usr)"`
Dir string `flagUsage:"specifies directory to store file upload / download (in memory by default)"`
// runner limit
TimeLimitCheckerInterval time.Duration `flagUsage:"specifies time limit checker interval" default:"100ms"`

View File

@ -4,9 +4,6 @@ import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
@ -30,7 +27,7 @@ var buffPool = sync.Pool{
}
// New creates grpc executor server
func New(worker worker.Worker, fs filestore.FileStore, srcPrefix string, logger *zap.Logger) pb.ExecutorServer {
func New(worker worker.Worker, fs filestore.FileStore, srcPrefix []string, logger *zap.Logger) pb.ExecutorServer {
return &execServer{
worker: worker,
fs: fs,
@ -43,7 +40,7 @@ type execServer struct {
pb.UnimplementedExecutorServer
worker worker.Worker
fs filestore.FileStore
srcPrefix string
srcPrefix []string
logger *zap.Logger
}
@ -169,7 +166,7 @@ func convertPBFileError(fe []envexec.FileError) []*pb.Response_FileError {
return rt
}
func convertPBRequest(r *pb.Request, srcPrefix string) (req *worker.Request, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
func convertPBRequest(r *pb.Request, srcPrefix []string) (req *worker.Request, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
defer func() {
if err != nil {
for _, fi := range streamIn {
@ -219,7 +216,7 @@ func convertPBPipeMap(p *pb.Request_PipeMap) worker.PipeMap {
}
}
func convertPBCmd(c *pb.Request_CmdType, srcPrefix string) (cm worker.Cmd, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
func convertPBCmd(c *pb.Request_CmdType, srcPrefix []string) (cm worker.Cmd, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
defer func() {
if err != nil {
for _, fi := range streamIn {
@ -284,13 +281,13 @@ func convertPBCmd(c *pb.Request_CmdType, srcPrefix string) (cm worker.Cmd, strea
return cm, streamIn, streamOut, nil
}
func convertPBFile(c *pb.Request_File, srcPrefix string) (worker.CmdFile, error) {
func convertPBFile(c *pb.Request_File, srcPrefix []string) (worker.CmdFile, error) {
switch c := c.File.(type) {
case nil:
return nil, nil
case *pb.Request_File_Local:
if srcPrefix != "" {
ok, err := checkPathPrefix(c.Local.GetSrc(), srcPrefix)
if len(srcPrefix) > 0 {
ok, err := model.CheckPathPrefixes(c.Local.GetSrc(), srcPrefix)
if err != nil {
return nil, err
}
@ -309,17 +306,6 @@ func convertPBFile(c *pb.Request_File, srcPrefix string) (worker.CmdFile, error)
return nil, fmt.Errorf("request file type not supported yet %v", c)
}
func checkPathPrefix(path, prefix string) (bool, error) {
if filepath.IsAbs(path) {
return strings.HasPrefix(filepath.Clean(path), prefix), nil
}
wd, err := os.Getwd()
if err != nil {
return false, err
}
return strings.HasPrefix(filepath.Join(wd, path), prefix), nil
}
func convertCopyOut(copyOut []*pb.Request_CmdCopyOutFile) []worker.CmdCopyOutFile {
rt := make([]worker.CmdCopyOutFile, 0, len(copyOut))
for _, n := range copyOut {

View File

@ -175,7 +175,7 @@ func ConvertResponse(r worker.Response, mmap bool) (ret Response, err error) {
}
// ConvertRequest converts json request into worker request
func ConvertRequest(r *Request, srcPrefix string) (*worker.Request, error) {
func ConvertRequest(r *Request, srcPrefix []string) (*worker.Request, error) {
req := &worker.Request{
RequestID: r.RequestID,
Cmd: make([]worker.Cmd, 0, len(r.Cmd)),
@ -238,7 +238,7 @@ func convertPipe(p PipeMap) worker.PipeMap {
}
}
func convertCmd(c Cmd, srcPrefix string) (worker.Cmd, error) {
func convertCmd(c Cmd, srcPrefix []string) (worker.Cmd, error) {
clockLimit := c.ClockLimit
if c.RealCPULimit > 0 {
clockLimit = c.RealCPULimit
@ -286,13 +286,13 @@ func convertCmd(c Cmd, srcPrefix string) (worker.Cmd, error) {
return w, nil
}
func convertCmdFile(f *CmdFile, srcPrefix string) (worker.CmdFile, error) {
func convertCmdFile(f *CmdFile, srcPrefix []string) (worker.CmdFile, error) {
switch {
case f == nil:
return nil, nil
case f.Src != nil:
if srcPrefix != "" {
ok, err := checkPathPrefix(*f.Src, srcPrefix)
if len(srcPrefix) != 0 {
ok, err := CheckPathPrefixes(*f.Src, srcPrefix)
if err != nil {
return nil, err
}
@ -312,6 +312,19 @@ func convertCmdFile(f *CmdFile, srcPrefix string) (worker.CmdFile, error) {
}
}
func CheckPathPrefixes(path string, prefixes []string) (bool, error) {
for _, p := range prefixes {
ok, err := checkPathPrefix(path, p)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
func checkPathPrefix(path, prefix string) (bool, error) {
if filepath.IsAbs(path) {
return strings.HasPrefix(filepath.Clean(path), prefix), nil

View File

@ -19,7 +19,7 @@ type Register interface {
}
// New creates new REST API handler
func New(worker worker.Worker, fs filestore.FileStore, srcPrefix string, logger *zap.Logger) Register {
func New(worker worker.Worker, fs filestore.FileStore, srcPrefix []string, logger *zap.Logger) Register {
return &handle{
worker: worker,
fileHandle: fileHandle{fs: fs},
@ -31,7 +31,7 @@ func New(worker worker.Worker, fs filestore.FileStore, srcPrefix string, logger
type handle struct {
worker worker.Worker
fileHandle
srcPrefix string
srcPrefix []string
logger *zap.Logger
}

View File

@ -20,7 +20,7 @@ type Register interface {
}
// New creates new websocket handle
func New(worker worker.Worker, srcPrefix string, logger *zap.Logger) Register {
func New(worker worker.Worker, srcPrefix []string, logger *zap.Logger) Register {
return &wsHandle{
worker: worker,
srcPrefix: srcPrefix,
@ -42,7 +42,7 @@ const (
type wsHandle struct {
worker worker.Worker
srcPrefix string
srcPrefix []string
logger *zap.Logger
}

View File

@ -10,6 +10,7 @@ import (
"log"
"os"
"runtime"
"strings"
"time"
"unsafe"
@ -38,7 +39,7 @@ var (
fs filestore.FileStore
work worker.Worker
srcPrefix string
srcPrefix []string
)
func newFilsStore(dir string) (filestore.FileStore, error) {
@ -78,7 +79,7 @@ func Init(i *C.char) C.int {
ip.MountConf = "mount.yaml"
}
srcPrefix = ip.SrcPrefix
srcPrefix = strings.Split(ip.SrcPrefix, ",")
var err error
fs, err = newFilsStore(ip.Dir)