docs: fix typos

This commit is contained in:
criyle 2024-02-03 15:19:49 +00:00
parent 505994205f
commit 8025bd36b6
11 changed files with 23 additions and 38 deletions

View File

@ -306,7 +306,7 @@ Output:
</details> </details>
Plese use PostMan or similar tools to send request to `http://localhost:5050/run` Please use PostMan or similar tools to send request to `http://localhost:5050/run`
<details><summary>Single (this example require `apt install g++` inside the container)</summary> <details><summary>Single (this example require `apt install g++` inside the container)</summary>
@ -613,7 +613,7 @@ Sandbox:
- for example, by default container 0 will run with 10001 uid & gid and container 1 will run with 10002 uid & gid... - for example, by default container 0 will run with 10001 uid & gid and container 1 will run with 10002 uid & gid...
- `-enable-cpu-rate` enabled `cpu` cgroup to control cpu rate using cfs_quota & cfs_period control (Linux only) - `-enable-cpu-rate` enabled `cpu` cgroup to control cpu rate using cfs_quota & cfs_period control (Linux only)
- `-cpu-cfs-period` specifies cfs_period if cpu rate is enabled (default 100ms) (valid value: \[1ms, 1s\]) - `-cpu-cfs-period` specifies cfs_period if cpu rate is enabled (default 100ms) (valid value: \[1ms, 1s\])
- `-seccomp-conf` specifies `seecomp` filter setting to load when running program (need build tag `seccomp`) (Linux only) - `-seccomp-conf` specifies `seccomp` filter setting to load when running program (need build tag `seccomp`) (Linux only)
- for example, by `strace -c prog` to get all `syscall` needed and restrict to that sub set - for example, by `strace -c prog` to get all `syscall` needed and restrict to that sub set
- however, the `syscall` count in one platform(e.g. x86_64) is not suitable for all platform, so this option is not recommended - however, the `syscall` count in one platform(e.g. x86_64) is not suitable for all platform, so this option is not recommended
- the program killed by seccomp filter will have status `Dangerous Syscall` - the program killed by seccomp filter will have status `Dangerous Syscall`
@ -707,7 +707,7 @@ If a bind mount is specifying a target within the previous mounted one, please e
#### Windows Security #### Windows Security
- Resources are limited by [JobObject](https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects) - Resources are limited by [JobObject](https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects)
- Privillege are limited by [Restricted Low Mandatory Level Token](https://docs.microsoft.com/en-us/windows/win32/secauthz/access-tokens) - Privilege are limited by [Restricted Low Mandatory Level Token](https://docs.microsoft.com/en-us/windows/win32/secauthz/access-tokens)
- Low Mandatory Level directory is created for read / write - Low Mandatory Level directory is created for read / write
### MacOS Support ### MacOS Support

View File

@ -18,7 +18,7 @@ import (
) )
var ( var (
srvAddr = flag.String("srvaddr", "localhost:5051", "GRPC server addr") srvAddr = flag.String("srv-addr", "localhost:5051", "GRPC server addr")
) )
const ( const (

View File

@ -32,7 +32,7 @@ type Config struct {
OutputLimit *envexec.Size `flagUsage:"specifies POSIX rlimit for output for each command" default:"256m"` OutputLimit *envexec.Size `flagUsage:"specifies POSIX rlimit for output for each command" default:"256m"`
CopyOutLimit *envexec.Size `flagUsage:"specifies default file copy out max" default:"256m"` CopyOutLimit *envexec.Size `flagUsage:"specifies default file copy out max" default:"256m"`
OpenFileLimit int `flagUsage:"specifies max open file count" default:"256"` OpenFileLimit int `flagUsage:"specifies max open file count" default:"256"`
Cpuset string `flagUsage:"control the usage of cpuset for all containerd process"` Cpuset string `flagUsage:"control the usage of cpuset for all container process"`
EnableCPURate bool `flagUsage:"enable cpu cgroup rate control"` EnableCPURate bool `flagUsage:"enable cpu cgroup rate control"`
CPUCfsPeriod time.Duration `flagUsage:"set cpu.cfs_period" default:"100ms"` CPUCfsPeriod time.Duration `flagUsage:"set cpu.cfs_period" default:"100ms"`
FileTimeout time.Duration `flagUsage:"specified timeout for filestore files"` FileTimeout time.Duration `flagUsage:"specified timeout for filestore files"`
@ -44,7 +44,7 @@ type Config struct {
MonitorAddr string `flagUsage:"specifies the metrics binding address"` MonitorAddr string `flagUsage:"specifies the metrics binding address"`
AuthToken string `flagUsage:"bearer token auth for REST / gRPC"` AuthToken string `flagUsage:"bearer token auth for REST / gRPC"`
EnableDebug bool `flagUsage:"enable debug endpoint"` EnableDebug bool `flagUsage:"enable debug endpoint"`
EnableMetrics bool `flagUsage:"enable promethus metrics endpoint"` EnableMetrics bool `flagUsage:"enable prometheus metrics endpoint"`
// logger config // logger config
Release bool `flagUsage:"release level of logs"` Release bool `flagUsage:"release level of logs"`

View File

@ -4,13 +4,10 @@ package main
import ( import (
"context" "context"
crypto_rand "crypto/rand"
"encoding/binary"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"log" "log"
math_rand "math/rand"
"net/http" "net/http"
"net/http/pprof" "net/http/pprof"
"os" "os"
@ -53,13 +50,12 @@ var logger *zap.Logger
func main() { func main() {
conf := loadConf() conf := loadConf()
if conf.Version { if conf.Version {
fmt.Print(version.Version) fmt.Println(version.Version)
return return
} }
initLogger(conf) initLogger(conf)
defer logger.Sync() defer logger.Sync()
logger.Sugar().Infof("config loaded: %+v", conf) logger.Sugar().Infof("config loaded: %+v", conf)
initRand()
warnIfNotLinux() warnIfNotLinux()
// Init environment pool // Init environment pool
@ -69,7 +65,7 @@ func main() {
prefork(envPool, conf.PreFork) prefork(envPool, conf.PreFork)
work := newWorker(conf, envPool, fs) work := newWorker(conf, envPool, fs)
work.Start() work.Start()
logger.Sugar().Infof("Started worker with parallelism=%d, workdir=%s, timeLimitCheckInterval=%v", logger.Sugar().Infof("Started worker with parallelism=%d, workDir=%s, timeLimitCheckInterval=%v",
conf.Parallelism, conf.Dir, conf.TimeLimitCheckerInterval) conf.Parallelism, conf.Dir, conf.TimeLimitCheckerInterval)
servers := []initFunc{ servers := []initFunc{
@ -272,17 +268,6 @@ func initLogger(conf *config.Config) {
} }
} }
func initRand() {
var b [8]byte
_, err := crypto_rand.Read(b[:])
if err != nil {
logger.Fatal("random generator init failed ", zap.Error(err))
}
sd := int64(binary.LittleEndian.Uint64(b[:]))
logger.Sugar().Infof("random seed: %d", sd)
math_rand.Seed(sd)
}
func prefork(envPool worker.EnvironmentPool, prefork int) { func prefork(envPool worker.EnvironmentPool, prefork int) {
if prefork <= 0 { if prefork <= 0 {
return return
@ -469,7 +454,7 @@ func newEnvBuilder(conf *config.Config) (pool.EnvBuilder, map[string]any) {
logger.Sugar().Fatal("create environment builder failed ", err) logger.Sugar().Fatal("create environment builder failed ", err)
} }
if conf.EnableMetrics { if conf.EnableMetrics {
b = &metriceEnvBuilder{b} b = &metricsEnvBuilder{b}
} }
return b, param return b, param
} }

View File

@ -49,7 +49,7 @@ var (
Namespace: metricsNamespace, Namespace: metricsNamespace,
Subsystem: execSubsystem, Subsystem: execSubsystem,
Name: "memory_bytes", Name: "memory_bytes",
Help: "Histgram for the command execution max memory", Help: "Histogram for the command execution max memory",
Buckets: memoryBucket, Buckets: memoryBucket,
}, []string{"status"}) }, []string{"status"})
@ -57,7 +57,7 @@ var (
Namespace: metricsNamespace, Namespace: metricsNamespace,
Subsystem: filestoreSubsystem, Subsystem: filestoreSubsystem,
Name: "size_bytes", Name: "size_bytes",
Help: "Histgram for the file size created in the file store", Help: "Histogram for the file size created in the file store",
Buckets: fileSizeBucket, Buckets: fileSizeBucket,
}) })
@ -171,13 +171,13 @@ func (m *metricsFileStore) Remove(id string) bool {
return success return success
} }
var _ pool.EnvBuilder = &metriceEnvBuilder{} var _ pool.EnvBuilder = &metricsEnvBuilder{}
type metriceEnvBuilder struct { type metricsEnvBuilder struct {
pool.EnvBuilder pool.EnvBuilder
} }
func (b *metriceEnvBuilder) Build() (pool.Environment, error) { func (b *metricsEnvBuilder) Build() (pool.Environment, error) {
e, err := b.EnvBuilder.Build() e, err := b.EnvBuilder.Build()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -30,7 +30,7 @@ type environ struct {
cpuRate bool cpuRate bool
} }
// Destroy destories the environment // Destroy destroys the environment
func (c *environ) Destroy() error { func (c *environ) Destroy() error {
return c.Environment.Destroy() return c.Environment.Destroy()
} }
@ -131,7 +131,7 @@ func (c *environ) Open(path string, flags int, perm os.FileMode) (*os.File, erro
return f, nil return f, nil
} }
// MkdirAll equivelent to os.MkdirAll but in container // MkdirAll equivalent to os.MkdirAll but in container
func (c *environ) MkdirAll(path string, perm os.FileMode) error { func (c *environ) MkdirAll(path string, perm os.FileMode) error {
if path == "" || path == "." { if path == "" || path == "." {
return nil return nil

View File

@ -52,7 +52,7 @@ type Profile struct {
Network bool Network bool
} }
// DefaultProfile defines the minimun default profile to run programs // DefaultProfile defines the minimum default profile to run programs
var DefaultProfile = Profile{ var DefaultProfile = Profile{
ReadableDir: []string{"/usr/lib"}, ReadableDir: []string{"/usr/lib"},
} }

View File

@ -27,14 +27,14 @@ func (p *process) Result() envexec.RunnerResult {
} }
func (p *process) Usage() envexec.Usage { func (p *process) Usage() envexec.Usage {
t, m, _ := getJobOjbectUsage(p.hJob) t, m, _ := getJobObjectUsage(p.hJob)
return envexec.Usage{ return envexec.Usage{
Time: t, Time: t,
Memory: m, Memory: m,
} }
} }
func getJobOjbectUsage(hJob windows.Handle) (time.Duration, envexec.Size, error) { func getJobObjectUsage(hJob windows.Handle) (time.Duration, envexec.Size, error) {
basicInfo := new(JOBOBJECT_BASIC_ACCOUNTING_INFORMATION) basicInfo := new(JOBOBJECT_BASIC_ACCOUNTING_INFORMATION)
if _, err := QueryInformationJobObject(hJob, JobObjectBasicAccountingInformation, if _, err := QueryInformationJobObject(hJob, JobObjectBasicAccountingInformation,
uintptr(unsafe.Pointer(basicInfo)), uint32(unsafe.Sizeof(*basicInfo)), nil); err != nil { uintptr(unsafe.Pointer(basicInfo)), uint32(unsafe.Sizeof(*basicInfo)), nil); err != nil {

View File

@ -53,7 +53,7 @@ type Cmd struct {
CopyOut []CmdCopyOutFile CopyOut []CmdCopyOutFile
CopyOutMax Size // file size limit CopyOutMax Size // file size limit
// CopyOutDir specifies a dir to dump all /w contnet // CopyOutDir specifies a dir to dump all /w content
CopyOutDir string CopyOutDir string
// additional memory option // additional memory option

View File

@ -34,7 +34,7 @@ type Pipe struct {
// Name defines copy out entry name if it is not empty and proxy is enabled // Name defines copy out entry name if it is not empty and proxy is enabled
Name string Name string
// Limit defines maximun bytes copy out from proxy and proxy will still // Limit defines maximum bytes copy out from proxy and proxy will still
// copy data after limit exceeded // copy data after limit exceeded
Limit Size Limit Size

View File

@ -14,7 +14,7 @@ var (
_ heap.Interface = &Timeout{} _ heap.Interface = &Timeout{}
) )
// Timeout is a file system with a maximun TTL // Timeout is a file system with a maximum TTL
type Timeout struct { type Timeout struct {
mu sync.Mutex mu sync.Mutex
FileStore FileStore
@ -28,7 +28,7 @@ type timeoutFile struct {
time time.Time time time.Time
} }
// NewTimeout creates a timeout file system with maximun TTL for a file // NewTimeout creates a timeout file system with maximum TTL for a file
func NewTimeout(fs FileStore, timeout time.Duration, checkInterval time.Duration) FileStore { func NewTimeout(fs FileStore, timeout time.Duration, checkInterval time.Duration) FileStore {
t := &Timeout{ t := &Timeout{
FileStore: fs, FileStore: fs,