Compare commits

...

262 Commits

Author SHA1 Message Date
criyle
a4e3da8fac chore(deps): upgrade dependencies
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-10-29 00:06:42 +00:00
criyle
bf22d6f479 feat(main): ignoring SIGTERM when managed by PM2
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
- there is a race condition during shutting down when go-judge receives SIGTERM from systemd and PM2 dumps a STOPPED status, resulting go-judge not auto-restart after a reboot
- PM2 uses SIGINT for stop signal so that we can distinguish different stop signal sent by different process manager
- So when detected that we are manged by PM2, we ignore the SIGTERM signal from the systemd and wait for PM2's SIGINT to avoid race condition

Unitech/pm2#6036
2025-10-28 16:27:13 +00:00
Kris
565f302e52
doc: Synchronize README.cn.md content with English version (#169)
This PR comprehensively reviews and synchronizes the Chinese documentation (README.cn.md) with the English version (README.md) to ensure technical accuracy and consistency.
2025-10-28 09:58:50 -04:00
criyle
67e158fb8a ci(action): fix buildx
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-10-07 02:07:03 +00:00
criyle
bdc2f19232 chore(deps): upgrade quic
quic-go/quic-go#5358
2025-10-07 02:02:34 +00:00
criyle
ad661b1ad2 feat(copy_out_truncate): add support to copy out truncate 2025-10-06 22:41:03 +00:00
criyle
b613b1f747 ci(goreleaser): fix build on arm v5 2025-10-06 22:02:40 +00:00
criyle
81e22d4af1 ci(goreleaser): fix arm docker build 2025-10-06 21:37:32 +00:00
criyle
03ad17a4c6 ci(goreleaser): use docker_v2 2025-10-06 21:29:21 +00:00
criyle
0ef1b987fa chore(deps): upgrade pb reference 2025-10-06 20:50:01 +00:00
criyle
8bb802f7d6 feat(pb): add copyOutTruncate to allow truncate when exceed size limit 2025-10-06 20:14:08 +00:00
criyle
b6e4b77232 chore(deps): upgrade to go1.25 and gin 1.11
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-09-23 19:00:31 +00:00
criyle
876bb4b9aa fix(go-sandbox): preserve bind mount flags during readonly remounts
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-08-23 20:26:52 +00:00
criyle
25240a00c2 ci: build for riscv64 images
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-08-18 01:15:57 +00:00
criyle
989e8dc651 chore(pb): update package version 2025-08-18 00:59:59 +00:00
criyle
95377a4fa6 chore(pb): migrate step 2 with opaque API 2025-08-18 00:50:33 +00:00
criyle
181b110a8b chore(deps): upgrade go-gin-prometheus with performance fix 2025-08-18 00:37:18 +00:00
criyle
21edeac11c chore(deps): downgrade go-gin-prometheus for performance
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
zsais/go-gin-prometheus#64
2025-08-14 19:20:47 +00:00
criyle
7e9bf4b84b ci: bump to go1.25
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-08-14 03:51:22 +00:00
criyle
48a47dabb8 fix(build): upgrade pb 2025-08-14 03:19:46 +00:00
criyle
51423d7110 chore(pb): migrate step 1 with hybrid API 2025-08-14 03:06:33 +00:00
criyle
6582aaea4f chore(pb): upgrade dependencies 2025-08-13 23:45:54 +00:00
criyle
28fd2eaef3 chore(deps): upgrade dependencies for go1.25 2025-08-13 22:34:51 +00:00
criyle
2bc4a4fcee refactor(ffi): remove unused code
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-08-05 21:56:13 -04:00
criyle
f57ca894cb ffi: add cpu rate config
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
#161
2025-07-27 14:45:29 -04:00
BoYanZh
3a849af692
fix: tmp fix for clock time < cpu time (#156) (#158)
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-06-26 15:01:15 -04:00
BoYanZh
2b42c8bed5
fix: typo (#157)
* fix: typo

* fix: typo

* fix: typo

* refactor: if -> switch

* style: make gopls happy
2025-06-26 14:33:06 -04:00
BoYanZh
6211423165
feat: support -copy-in-dir for easier debugging (#155)
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
* feat: support -copy-in-dir for easier debugging

* fix: use relative path
2025-06-26 02:20:03 -04:00
criyle
6bf9f2e224 build(deps): upgrade go-seccomp-bpf to fix seccomp filter
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
elastic/go-seccomp-bpf#40
criyle/go-sandbox#8
2025-06-21 17:19:59 -04:00
BoYanZh
c5eaf846c3
feat: support -no-fallback cli flag (#151) (#152)
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-06-09 17:03:02 -04:00
criyle
b15fce6ad7 build(deps): replace unmaintained yaml package
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-05-29 23:52:16 +00:00
criyle
94f57a3553 refactor(envexec): abstract stream in and out
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-05-29 19:14:05 +00:00
criyle
c5575ffe2b refactor(envexec): support file stream in & out directly
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-05-29 04:40:53 -04:00
criyle
033790c1c0 refactor(env): split big function into smaller chuncks
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-05-28 22:17:41 -04:00
criyle
ac3b4183fd refactor(env): desugar zap logger 2025-05-28 19:58:48 -04:00
criyle
7f663ba0fd refactor(envexec): normalize error messages
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-05-25 22:44:50 -04:00
criyle
86b85d8556 refactor(*): replace path with file path and normalize error messages 2025-05-25 22:19:42 -04:00
criyle
cf2e097a6b refactor(envexec): minor modification 2025-05-25 21:01:18 -04:00
criyle
8c03485866 goreleaser: run tests 2025-05-25 21:00:31 -04:00
criyle
d64158dc3a build(deps): update dependencies 2025-05-25 21:00:08 -04:00
criyle
a8679a52c5 refactor(*): desugar zap logger
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-05-21 22:05:30 -04:00
DNEGEL3125
6d77d53e7f
test(rest_executor): add unit test for handleRun (#149)
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
* test(rest_executor): add unit test for handleRun

* test(mockWorker): improve Submit method by using buffered channel
2025-05-21 10:58:59 -04:00
DNEGEL3125
40e47b2bfa
refactor(rest_executor): rename handlers and clean up routing logic (#148)
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
* refactor: move `Register` interface to a new `register` package

* refactor(rest_executor): separate route registration logic

- rename struct: `handle` -> `cmdHandle`
- remove member variable `fileHandle` from `CmdHandle`
- rename function: `New` -> `NewCmdHandle`
- create function: `NewFileHandle`

* refactor(rest_executor): move `register.go` to `rest_executor/`
2025-05-18 14:46:08 -04:00
DNEGEL3125
f52326c3ce
Test file handler (#146)
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
* test: add unit test for filePost

* test: add unit test for fileGet

* test: add unit test for fileIDGet

* test: add unit test for fileIDDelete

* test: use `t.TempDir()` for temporary directories
2025-05-16 00:10:00 -04:00
dependabot[bot]
c762e0c967
build(deps): bump golang.org/x/net from 0.36.0 to 0.38.0 in /pb (#141)
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-04-16 23:22:24 +00:00
criyle
1872249bb7 doc: simplify & add link to prometheus metrics endpoint
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-04-06 01:40:54 +00:00
criyle
e6249239cf doc: add citation.cff 2025-04-06 01:06:36 +00:00
criyle
e7228aefbe metrics: fix build on other platforms
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-04-03 21:39:01 -04:00
criyle
052a482697 metrics: add statistic from cgroup 2025-04-03 21:30:10 -04:00
criyle
31d6d497c4 build(deps): bump dependencies 2025-04-03 23:29:39 +00:00
criyle
cf7642eda3 main: fix enable metrics for file store 2025-04-03 21:57:40 +00:00
criyle
de01247a15 env: fix linux kernel version check
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-03-22 22:21:02 -04:00
dependabot[bot]
33d873e927
build(deps): bump golang.org/x/net from 0.35.0 to 0.36.0 in /pb (#139)
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-03-13 02:09:28 +00:00
criyle
6179925022 envexec: remove pipe file when copy in fails 2025-03-12 19:10:20 -04:00
criyle
20598d1768 filestore: do not create tmp dir, restore service after restart
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-03-12 16:04:52 -04:00
criyle
083207df04 build(deps): go 1.24
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-03-11 03:03:45 +00:00
criyle
94645000ff build: go 1.24
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-03-11 02:53:36 +00:00
criyle
13188ff482 pb: use sub package
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-03-02 21:20:32 +00:00
criyle
bc8083964f pb: publish subpackage individually, to avoid unnecessary dependency for clients 2025-03-02 21:15:29 +00:00
criyle
ff472b96f4 pb: add procPeak measurement 2025-03-02 20:37:19 +00:00
criyle
897d045302 pb: split messages according to best practice 2025-03-02 20:32:49 +00:00
criyle
31fe533f0e pb: init migration for protobuf edition 2023
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-03-02 02:11:11 -05:00
criyle
6234433bcd doc: reduce length & move more to docs.goj.ac
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-02-25 00:47:49 +00:00
criyle
dec0953ef3 doc: remove build
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
2025-02-23 00:52:02 -05:00
criyle
707297a66c doc: remove mount 2025-02-23 03:38:26 +00:00
criyle
567b7702d7 docs: reorganized README and add online documentations 2025-02-23 02:56:13 +00:00
criyle
3f2baa31de linuxc: update sandbox with vfork support for clone3(CLONE_INTO_CGROUP)
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-02-22 21:38:20 +00:00
criyle
9ae8487a80 linuxc: fix cgroup fd & add clean up to containers when exit
Some checks failed
Build / Goreleaser (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Has been cancelled
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Has been cancelled
criyle/go-sandbox#13
2025-02-20 23:13:57 -05:00
criyle
8b9b6f33dc linuxc: add cached cgroup implementation 2025-02-21 00:46:42 +00:00
criyle
ffdf3e3925 linuxc: try support faster new clone3(CLONE_INTO_CGROUP) syscall
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
criyle/go-sandbox#13
2025-02-20 22:42:24 +00:00
criyle
60a18591fc monitor: add stat for worker queue 2025-02-20 18:37:06 +00:00
criyle
94a613aca2 build(deps): update go-sandbox
Some checks are pending
Build / Goreleaser (push) Waiting to run
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (amd64_v3, windows) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, darwin) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, linux) (push) Blocked by required conditions
Build / Upload artifacts-${{ matrix.os }}-${{ matrix.arch }} (arm64_v8.0, windows) (push) Blocked by required conditions
2025-02-20 04:21:38 +00:00
criyle
efa629a7d5 feat(linux): add procPeak measurement for peak thread count in the container
Linux kernel >= 6.1 && cgroup v2 only
2025-02-20 03:36:52 +00:00
criyle
9dc338063a build(deps): upgrade dependencies 2025-02-20 03:01:46 +00:00
criyle
49f091c9e8 env: fix nil pointer when no cgroup enabled 2025-01-27 20:50:49 +00:00
criyle
3218fcbab2 env: add cgroupControllers to /config and warn when certain cgroup controller is not enabled 2025-01-26 00:42:11 -05:00
criyle
cadf5f4b08 build(deps): deprecate o-grpc-prometheu 2025-01-25 22:56:34 -05:00
criyle
1d44e623fc pb: upgrade protobuf 2025-01-25 22:23:14 -05:00
criyle
3836841bbd build(ci): fix upload 2025-01-25 21:24:52 -05:00
criyle
f7a09a8661 build(ci): fix 2025-01-25 21:12:03 -05:00
criyle
08dd4d99f5 build(ci): upload individual artifacts 2025-01-25 21:10:55 -05:00
criyle
17878d722f build(ci): use goreleaser only 2025-01-25 20:25:19 -05:00
criyle
5ab4510877 build(ci): remove unecessary actions 2025-01-25 19:35:43 -05:00
criyle
a77066dd5d build(ci): try fix goreleaser build cache 2025-01-25 19:24:04 -05:00
criyle
bf5df76168 build(ci): update goreleaser 2025-01-25 19:00:16 -05:00
criyle
6e65e2bd4f winc: fix setup time 2025-01-25 18:17:21 -05:00
criyle
58989da9a6 build(deps): update grpc middleware 2025-01-25 18:17:07 -05:00
criyle
5de715c576 build(deps): update packages 2025-01-25 16:12:28 -05:00
dependabot[bot]
c9c1117f89
build(deps): bump golang.org/x/net from 0.32.0 to 0.33.0 (#132)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.32.0 to 0.33.0.
- [Commits](https://github.com/golang/net/compare/v0.32.0...v0.33.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-10 13:41:30 -05:00
criyle
36528c7e4d build(deps): update dependencies 2024-12-13 18:26:05 +00:00
dependabot[bot]
0e52741b52
build(deps): bump golang.org/x/crypto from 0.26.0 to 0.31.0 (#124) 2024-12-13 17:22:25 +00:00
tobiichi3227
205ec8b053
doc: fix wrong directory path (#116) 2024-09-17 20:27:06 -04:00
criyle
fdf64f4899 goreleaser: deprecate snapshot.name_template 2024-08-20 20:56:56 +00:00
criyle
bcc3bcfbe2 build: go 1.23 2024-08-20 20:43:36 +00:00
criyle
12fca4b82b doc: deprecate centOS 7 2024-07-23 13:50:34 +00:00
dependabot[bot]
646f0241f0
build(deps): bump google.golang.org/grpc from 1.64.0 to 1.64.1 (#110) 2024-07-11 23:26:28 +00:00
criyle
d20feda117 linuxcontainer: allow read write permission on /proc fs
#106
2024-05-23 10:38:02 +00:00
criyle
a8d50dba48 build: remove dynamic library on macOS 2024-05-04 01:41:34 +00:00
criyle
ac732af3c9 build: try fix CGO_ENABLE on macOS 2024-05-04 01:38:31 +00:00
criyle
2c09373414 build(deps): update dependencies
fix #104
2024-05-04 01:31:27 +00:00
criyle
89958d4e00 doc: fix -container-cred-start 2024-04-16 12:05:09 +00:00
criyle
5a3ccca09b linuxcontainer: allow initCmd to be executed to initialize new container
fix #103
2024-04-16 11:14:17 +00:00
criyle
2ade4b9437 deps: remove deprecated functions and use rand/v2 2024-04-05 04:53:53 +00:00
dependabot[bot]
b4352b11e7
build(deps): bump google.golang.org/protobuf from 1.32.0 to 1.33.0 (#100) 2024-03-14 00:37:48 +00:00
criyle
f7327c0aa4 envexec: add eligibility check for pipeMapping
issue: #98
2024-03-08 15:16:30 +00:00
criyle
4c53a0c272 build: add grpcnotrace tag for smaller binary size 2024-03-04 07:47:31 +00:00
criyle
da3793123b grpc: add grpc msg size config and set default to 64m
fix #96
2024-03-04 07:20:30 +00:00
BoYanZh
b0c85a6481
grace: fix empty copyOut (#97)
* fix: remove empty copyOut

* reserve capacity for copyOut

---------

Co-authored-by: Yang Gao <6821729+criyle@users.noreply.github.com>
2024-03-04 15:12:53 +08:00
criyle
01863bc522 build: go 1.22 2024-02-21 02:15:29 +00:00
criyle
39b603575b doc: update interface 2024-02-06 12:57:36 +00:00
criyle
581b925450 stream: fix goroutine leak 2024-02-06 12:33:48 +00:00
criyle
aa41950f89 stream: add websocket transport layer support 2024-02-06 10:25:46 +00:00
criyle
a52f1360cd pb: use empty message for stream flag 2024-02-05 15:51:56 +00:00
criyle
9966f490f3 stream: use index and fd to indicate io streams rather than name 2024-02-05 13:12:04 +00:00
criyle
9fe356feb5 shell: decouple terminal with grpc via stream interface 2024-02-05 11:55:58 +00:00
criyle
8dd368a655 stream: rename types to avoid stutters 2024-02-05 08:57:12 +00:00
criyle
c29d0adce2 rest: no copy file download 2024-02-03 15:49:32 +00:00
criyle
8025bd36b6 docs: fix typos 2024-02-03 15:19:49 +00:00
criyle
505994205f github: remove cache as set up go did it 2024-02-03 13:02:26 +00:00
criyle
f716b7a682 github: upgrade action versions 2024-02-03 12:57:13 +00:00
criyle
eca512b06f github: update action versions 2024-02-03 12:49:58 +00:00
criyle
cb1256f4bf shell: support cancel with double ctrl-c 2024-02-03 12:39:29 +00:00
criyle
010c30f85e stream: decoupled stream execution with grpc stream 2024-02-03 11:56:57 +00:00
criyle
485aa02153 deps: bump versions 2024-02-03 07:34:25 +00:00
dependabot[bot]
3496e24ee4
build(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 (#92) 2023-12-19 03:01:08 +00:00
criyle
f25d768f65 doc: update naming convention 2023-11-10 01:52:59 +00:00
criyle
473d620996 env: avoid duplicated error and update gitignore 2023-11-10 01:46:32 +00:00
criyle
a8c49e95c4 github: fix build 2023-11-10 01:30:12 +00:00
criyle
4220c29481 *: rename executorserver to go-judge 2023-11-10 01:20:28 +00:00
criyle
4de8c98bc2 linuxc: create transient unit cgroup on systemd enabled distribution via dbus
fixes #89
2023-11-09 10:57:42 +00:00
criyle
2f2478539c model: deprecate strict_memory_limit to percise definition 2023-10-30 04:04:42 +00:00
criyle
eded6e02f6 linuxc: add addressSpaceLimit to enable rlimit_as 2023-10-30 03:10:28 +00:00
dependabot[bot]
1117e6eed7
build(deps): bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#88) 2023-10-26 01:01:24 +00:00
dependabot[bot]
4b6c7ac52b
build(deps): bump golang.org/x/net from 0.15.0 to 0.17.0 (#87)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.15.0 to 0.17.0.
- [Commits](https://github.com/golang/net/compare/v0.15.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-12 22:41:38 +08:00
criyle
eecd8bd6f2 deps: upgrade to go1.21 with dependencies 2023-09-27 13:50:23 +00:00
criyle
fb90a1ece3 winc: minor fix and fixed some typo
close #84
2023-09-25 14:33:40 +00:00
criyle
faec026b31 *: update dependency versions 2023-08-11 08:12:42 +00:00
SourceRoc
04804ed605
winc: fixed the error of check regular file (#78)
Co-authored-by: Source_Roc <sourceroc_fsf@163.com>

fixed #74
2023-08-11 15:55:47 +08:00
criyle
9e403cb2d2 doc: reorder paragraphs & update dependencies 2023-07-21 09:07:42 +00:00
criyle
185c73bed5 ci: supporting riscv64 2023-07-21 07:31:38 +00:00
criyle
5d037c73ef build(deps): bump all dependencies to latest 2023-06-11 08:41:49 +00:00
criyle
8b35d65755 build(deps): bump all dependencies to latest 2023-06-11 08:37:33 +00:00
dependabot[bot]
47ca7445f2
build(deps): bump github.com/gin-gonic/gin from 1.9.0 to 1.9.1 (#67)
Bumps [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin) from 1.9.0 to 1.9.1.
- [Release notes](https://github.com/gin-gonic/gin/releases)
- [Changelog](https://github.com/gin-gonic/gin/blob/master/CHANGELOG.md)
- [Commits](https://github.com/gin-gonic/gin/compare/v1.9.0...v1.9.1)

---
updated-dependencies:
- dependency-name: github.com/gin-gonic/gin
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-11 16:12:06 +08:00
ArArgon
80cadb3be4
fix: update exit status when status != 0 (#68) 2023-06-11 16:10:52 +08:00
criyle
90de4c544c main: allow -srf-prefix to pass list of path split by comma 2023-04-14 03:46:30 -07:00
criyle
afb21cc2f7 main: listen on both ipv4 and ipv6 localhost when exists 2023-04-14 02:51:04 -07:00
criyle
495ad3f4ba config: use localhost when not in container 2023-04-13 18:22:24 -07:00
criyle
dd8c6ec2d0 *: bump dependencies & fix multiple cmd pipe proxy copy out 2023-04-13 02:47:58 -07:00
criyle
a23ea1c625 github: update go version to 1.20 2023-02-19 02:04:46 -08:00
criyle
348abdaf53 api: add /config endpoint for configuration and update go1.20 2023-02-19 02:02:02 -08:00
criyle
330573eceb envexec: open collecting files before exec to avoid error when deleted 2022-12-26 13:13:29 -08:00
criyle
c7d4babb64 config: set default value for copyOutMax to 256m 2022-12-02 21:56:39 -08:00
criyle
32870c4516 ci: update build.yml 2022-11-13 15:29:27 -08:00
criyle
a06fc0b259 env: support symlink creation 2022-11-13 15:23:29 -08:00
yzy-1
561c0a6955
ffi: refactor cinterface (#43)
* ffi: refactor cinterface

* ffi: change Init parameters to JSON
2022-10-25 02:16:57 -04:00
criyle
e5c7b811e5 github: upgrade actions 2022-10-20 21:32:32 -07:00
criyle
3531ea67d0 github: update actions 2022-10-20 21:11:06 -07:00
criyle
a10fd6892e main: log error when listening failed 2022-10-20 20:35:47 -07:00
criyle
8bd65f1e9c model: fix file leak for gRPC and websocket handler
fix #40
2022-10-18 20:55:16 -07:00
criyle
5b5924608c linuxcontainer: build on go1.19
log info for ws error

fix #38
fix #39
2022-10-01 19:08:08 -07:00
criyle
f2f8631cb8 linuxcontainer: fix nested mount configuration 2022-08-27 00:37:48 -07:00
criyle
ef85788a6a linuxcontainer: load default environment variable from /.env 2022-08-25 20:41:07 -07:00
criyle
9de5338137 linuxc: fix exec failure message 2022-08-13 01:34:17 -07:00
criyle
1cd461b1c5 linuxc: look up executable inside container using PATH environment variable 2022-08-13 01:24:18 -07:00
criyle
3734a92c4b linuxcontainer: ignore signals and unresponsive containers 2022-08-09 21:29:35 -07:00
criyle
b248dd1747 grpc: wrap error with status code
resolves #37
2022-07-30 00:56:59 -07:00
criyle
80633d0acd linuxcontainer: use temp directory as root 2022-07-15 23:57:20 -07:00
criyle
0d5148f703 build: fix docker platform 2022-05-23 15:28:19 -07:00
criyle
1bba5723d2 build: fix dependency 2022-05-23 14:48:57 -07:00
criyle
ce07b7079a build: use goreleaser 2022-05-23 14:47:50 -07:00
汪心禾 Wang, Xinhe
77abb8211f
Fix typo (#35)
- go 语音
+ go 语言
2022-05-14 00:34:10 -04:00
criyle
f84badc067 *: bump dependency versions 2022-04-22 15:57:33 -07:00
criyle
4b6ab1923c mac: fix Xcode clang compile 2022-04-03 03:49:48 -07:00
criyle
aeafe49334 mac: ignore memory limit 2022-04-03 03:09:20 -07:00
criyle
3e2c914136 *: drop macOS support 2022-03-20 23:43:44 -07:00
criyle
105980bb73 *: bump to go1.18 2022-03-20 23:40:21 -07:00
criyle
fcb56ae242 container: fix build by using unix package 2022-03-13 11:18:56 +00:00
criyle
227f398662 container: add support to copy into sub-directory 2022-03-13 11:13:33 +00:00
criyle
a4334d00d6 linuxcontainer: fix cgroup v1 cpuRateLimit 2022-03-01 00:06:44 -08:00
criyle
a461f88d64 main: move metrics / debug to another endpoint 2022-02-21 23:28:55 -08:00
criyle
d00db1c7f7 metrics: adjust some metrics names 2022-02-20 01:34:43 -08:00
criyle
3fbef94e0f version: fix version generation for git action 2022-02-12 09:39:33 +00:00
criyle
00fba25769 version: fix git action and docker version generation 2022-02-12 09:29:55 +00:00
criyle
414efbf597 envexec: performance improvements for content copyIn 2022-02-12 09:05:10 +00:00
criyle
c233f64485 linuxcontainer: fix cgroup v2 memory usage 2022-02-12 08:19:13 +00:00
criyle
31533cb73d filestore: performance boost 2022-02-01 13:00:11 -08:00
criyle
43f1e0dab2 envexec: reduce goroutine usage
doc: update cgroup v2
2022-01-31 09:42:23 +00:00
criyle
5fd53e07ca filestore: use math/rand & reduce fileId to 40bit 2022-01-08 19:25:23 -08:00
criyle
dd4c6ee994 cgroup: add cgroup v2 support in containers 2021-12-25 23:28:13 +00:00
criyle
359fda504e cgroup: add support of cgroup v2 2021-12-24 23:02:17 -08:00
criyle
96ad6f979b container: add ability to mask path 2021-11-28 23:26:57 -08:00
criyle
5a79fa24f7 *: bump dependency versions 2021-11-19 15:09:43 -08:00
criyle
dbcfc79614 worker: add ability to cancel task when queue is full 2021-11-19 14:34:00 -08:00
criyle
6643a592ba doc: update README 2021-11-14 19:52:51 -08:00
criyle
1c3681ba39 ws: add ability to cancel running task
- client is able to cancel task via close connection and cancel request

close #24
2021-11-14 19:17:07 -08:00
criyle
049d26a179 linuxc: limit open file count & add stdio devices 2021-11-12 01:35:47 -08:00
criyle
d8581a0c0c linuxcontainer: allow restrict usage of CPU and CPU set
- change cpuRate to int and 1000 means 1 CPU 100%
2021-10-30 23:25:30 -07:00
criyle
7f0b066258 linuxcontainer: do not change uid/gid by default 2021-09-27 23:57:46 -07:00
criyle
34e87660d8 fix build 2021-09-20 19:09:04 -07:00
criyle
1326a38b25 model: fix gRPC segv caused by mmap before encoding
- fix mount proc argument
2021-09-20 19:00:03 -07:00
criyle
915cbaccbb envexec: add detailed file error message 2021-09-20 00:43:05 -07:00
criyle
b54becc32d envexec: collect output through a file in container
- add `pipe` boolean to collector to indicate whether to use pipe or file as collector

close #21
2021-09-19 22:05:24 -07:00
criyle
7e5060b349 gomod: fix replace 2021-09-19 15:48:45 -07:00
criyle
78bb4d85de filestore: use /dev/shm instead of memory as file storage
- increase default tmpfs size to 128M
- add /config to get file store path
- remove memory only file store

close #20
2021-09-19 15:47:24 -07:00
criyle
9414057b8b build: fix darwin/arm64 2021-08-18 22:47:27 -07:00
criyle
841618d13a build: macos does not build on go 1.17, keep 1.16 2021-08-18 22:42:23 -07:00
criyle
841bc95e61 *: upgrade to go 1.17 2021-08-18 22:13:13 -07:00
Yang Gao
323b21a982
Merge pull request #16 from undefined-moe/patch-2
typo
2021-08-17 10:35:12 -07:00
undefined
dfa3296507
typo 2021-08-18 00:55:19 +08:00
undefined
837160b034
typo 2021-08-18 00:54:07 +08:00
criyle
24deb3aeac linuxcontainer: add CAP_SYS_RESOURCE 2021-08-10 00:00:37 -07:00
Yang Gao
5a1e9cfa05
Merge pull request #15 from undefined-moe/patch-1 2021-08-09 12:13:17 -07:00
undefined
49078dadc8
typo 2021-08-10 03:12:03 +08:00
criyle
176a61e96e sandbox: add error location & ignore unshare cgroup error 2021-08-04 22:46:47 -07:00
criyle
d6042729e5 *: bump dependecies with go1.16.6 2021-07-15 19:03:05 -07:00
criyle
55ffb52086 main: reduce memory allocation & force GC regularly 2021-06-24 20:27:43 -07:00
criyle
c34a83b617 file: get /file contains original name 2021-06-22 01:15:01 -07:00
criyle
af6605ad33 readme: add missing command line arguments 2021-06-12 19:28:23 -07:00
criyle
e99bcce3db version: add pipeProxy feature flag & udpate dependencies 2021-06-12 17:45:01 -07:00
criyle
0d09e88184 envexec: Add pipe proxy support 2021-06-12 17:33:53 -07:00
criyle
6195204dcc envexec: add optional flag for copyOut files
fix #14
2021-06-05 20:24:08 -07:00
criyle
46ec099685 linuxcontainer: bump dependency versions go1.16.5 2021-06-05 17:07:27 -07:00
criyle
9dba0d0b09 doc: update README 2021-05-08 14:55:51 -07:00
criyle
64ba821390 metrics: add metrics for environment counts 2021-05-01 21:01:47 -07:00
criyle
4cd2ed2bde mod: bump dependency versions 2021-04-25 18:03:47 -07:00
Yang Gao
6003a70557
Merge pull request #13 from yzy-1/master
feat(*): converting form string to model.Status
2021-04-25 14:03:29 -07:00
leafor
1bc2df59ea feat(*): converting form string to model.Status 2021-04-25 19:28:46 +08:00
criyle
6a9a2c6a3b metrics: fix size map 2021-04-17 18:17:41 -07:00
criyle
ba8d3924d8 feat: add timeout support to filestore
resolve #11
2021-04-17 18:13:18 -07:00
criyle
222eb97b25 docs: 加入中文文档 2021-04-06 02:03:19 -07:00
criyle
e2bddd3216 linuxcontainer: fix fd leak 2021-03-30 21:41:48 -07:00
criyle
eda1962b0c linuxcontainer: host async wait 2021-03-29 22:52:02 -07:00
criyle
d799f614f2 linuxcontainer: use async executor 2021-03-29 01:25:10 -07:00
criyle
f624a0f6a8 linuxcontainer: bump dependency versions 2021-03-27 19:27:43 -07:00
criyle
5acb22fab6 linuxcontainer: report error if cgroup set failed 2021-03-21 18:53:15 -07:00
criyle
1b21b745d8 build: try parallelize docker build 2021-03-19 19:10:31 -07:00
criyle
6ab5bb9ec0 build: try support multiple arch 2021-03-19 18:53:48 -07:00
criyle
fb58a761d3 build: try to support multiple arch 2021-03-19 18:49:14 -07:00
criyle
9dec36d395 filestore: refactor uniqure id generator 2021-03-19 00:28:11 -07:00
criyle
d2fcb923c9 envexec: fix build on macOS 2021-03-13 16:13:25 -08:00
criyle
2acca7d71c envexec: refactor to not use empty interface 2021-03-13 16:08:51 -08:00
Yang Gao
ba9ddc610a
Merge pull request #10 from zx2c4-forks/xsyswindows-change
winc: use uintptr variable for key parameter of GetQueuedCompletionStatus
2021-02-24 18:28:04 -08:00
Jason A. Donenfeld
5ce2718fe2 winc: use uintptr variable for key parameter of GetQueuedCompletionStatus
As of https://github.com/golang/sys/commit/683adc9d29d7 this function
now takes a uintptr, in order to avoid a buffer overflow.
2021-02-25 02:44:36 +01:00
criyle
4267ddee3c feat(*): deprecate ioutil & fix kernel version check 2021-02-24 00:16:35 -08:00
criyle
20c0ae0603 feat(*): bump to go 1.16 2021-02-21 19:47:53 -08:00
criyle
8d3bf1bf62 feat(sandbox): add strict memory limit
- remove rlimit data when cgroup enabled
- add `strictMemoryLimit` to request parameter to re-enable rlimit data limit
2021-02-06 12:46:39 -08:00
criyle
2213b5b092 feat(env): compatibility with kernel >= 3.10 2021-01-21 00:44:01 -08:00
criyle
7ca25c8659 bump dependency versions 2021-01-09 11:25:12 -08:00
criyle
a0cc4acbe8 Reduce build size 2021-01-09 00:38:46 -08:00
criyle
0db852c242 feat(worker): add default file copy out max 2021-01-09 00:22:51 -08:00
criyle
8821125e9e security(envexec): add check for regular file when copy out 2021-01-02 12:45:05 -08:00
criyle
d48341b23c Docker build 2020-12-30 15:04:39 -08:00
criyle
22fe2b3a99 Docker version 2020-12-30 13:39:59 -08:00
criyle
f5a84cd04a Action docker 2020-12-30 13:23:19 -08:00
criyle
6055f165b5 Update naming: realCpuLimit -> clockLimit 2020-12-30 13:01:03 -08:00
criyle
d66889f475 Update metrics & fix grpc error 2020-12-28 16:48:40 -08:00
criyle
2df163126c Refactor executor server to multiple modules 2020-12-27 16:45:48 -08:00
criyle
8e096ed14b Update gRPC package versions 2020-12-26 19:43:56 -08:00
criyle
f5dff9ca57 Refactor package structure & debug outputs 2020-12-25 17:25:09 -08:00
criyle
65630fbbcf Fix potential cgroup null pointer 2020-12-17 22:57:43 -08:00
criyle
4909468c48 Fix container reset (affacting v0.8.1 - v0.9.2) 2020-12-17 22:31:05 -08:00
criyle
04cfb36722 Add support to load a seccomp filter 2020-12-13 21:08:52 -08:00
156 changed files with 12638 additions and 7725 deletions

View File

@ -1,7 +0,0 @@
root = "."
[build]
cmd = "go build -o ./tmp/executorserver ./cmd/executorserver"
bin = "tmp/executorserver"
include_ext = ["go"]

View File

@ -1,7 +1,8 @@
root = "." root = "."
[build] [build]
cmd = "go build -o ./tmp/executorserver ./cmd/executorserver" cmd = "go build -o ./tmp/go-judge ./cmd/go-judge"
bin = "tmp/executorserver" full_bin = "tmp/go-judge -enable-grpc -enable-debug -enable-metrics -http-addr=:5050 -grpc-addr=:5051"
include_ext = ["go"] include_ext = ["go"]
exclude_dir = ["dist", "node_modules"]

View File

@ -1,33 +0,0 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
/judge
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# OS
.DS_Store
# Test Env
env*.sh
init.sql
# Documents
LICENSE
README.md
node_modules
/cinit
# not release yet
package*.json
example.js
executor_server.h

View File

@ -3,181 +3,80 @@ on:
push: push:
branches: [master] branches: [master]
tags: [v*] tags: [v*]
permissions:
contents: write
jobs: jobs:
create-release: goreleaser:
name: Create release name: Goreleaser
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
steps: steps:
- run: echo Release ${{ github.ref }} - name: Checkout
- name: Create Release uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.25"
cache-dependency-path: |
go.sum
- name: Remove unsupported tags
run: git tag -d $(git tag -l "pb/*")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
if: ${{ contains(github.ref, 'v') }} if: ${{ contains(github.ref, 'v') }}
id: create_release with:
uses: actions/create-release@v1 # either 'goreleaser' (default) or 'goreleaser-pro'
distribution: goreleaser
version: latest
args: release --clean
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Test GoReleaser
uses: goreleaser/goreleaser-action@v6
if: ${{ ! contains(github.ref, 'v') }}
with: with:
tag_name: ${{ github.ref }} # either 'goreleaser' (default) or 'goreleaser-pro'
release_name: Release ${{ github.ref }} distribution: goreleaser
draft: false version: latest
prerelease: false args: release --snapshot --clean
build: - name: Upload assets
name: Build-${{ matrix.os }} uses: actions/upload-artifact@v4
needs: create-release with:
runs-on: ${{ matrix.os }} name: go-judge
path: dist/
upload-artifacts:
name: Upload artifacts-${{ matrix.os }}-${{ matrix.arch }}
runs-on: ubuntu-latest
needs: goreleaser
strategy: strategy:
matrix: matrix:
os: os:
- windows-latest - windows
- ubuntu-latest - linux
- macos-latest - darwin
GOARCH: arch:
- amd64 - amd64_v3
- arm64_v8.0
steps: steps:
- name: Set up Go 1.14 - uses: actions/download-artifact@v4
uses: actions/setup-go@v2
with: with:
go-version: 1.14 name: go-judge
- name: Check out path: dist
uses: actions/checkout@v2 - name: Upload assets go-judge
- name: Get git tag ref uses: actions/upload-artifact@v4
run: git fetch --prune --unshallow --tags
- name: Restore Cache
uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod name: go-judge_${{ matrix.os == 'darwin' && 'macos' || matrix.os }}_${{ matrix.arch }}
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} path: dist/go-judge_${{ matrix.os }}_${{ matrix.arch }}/go-judge${{ matrix.os == 'windows' && '.exe' || ''}}
restore-keys: | - name: Upload assets go-judge-shell
${{ runner.os }}-go- uses: actions/upload-artifact@v4
- name: Download dependencies
run: go mod download
- name: Build on Linux
if: ${{ matrix.os == 'ubuntu-latest' }}
env:
GOARCH: ${{ matrix.GOARCH }}
run: |
go generate ./cmd/executorserver
go build -o executorserver ./cmd/executorserver
go build -o executorshell ./cmd/executorshell
- name: Build shared objects on Linux
if: ${{ matrix.os == 'ubuntu-latest' }}
env:
GOARCH: ${{ matrix.GOARCH }}
run: |
go build -o cinit ./cmd/cinit
go build -buildmode=c-shared -o executorserver.so ./cmd/ffi
- name: Upload executorserver on linux
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v2
with: with:
name: ExecutorServer-${{ matrix.GOARCH }} name: go-judge-shell_${{ matrix.os == 'darwin' && 'macos' || matrix.os }}_${{ matrix.arch }}
path: executorserver path: dist/go-judge-shell_${{ matrix.os }}_${{ matrix.arch }}/go-judge-shell${{ matrix.os == 'windows' && '.exe' || ''}}
- name: Upload executorshell on linux
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorShell-${{ matrix.GOARCH }}
path: executorshell
- name: Upload cinit on linux
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v2
with:
name: cinit-${{ matrix.GOARCH }}
path: cinit
- name: Upload executorserver.so on linux
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorServer-${{ matrix.GOARCH }}.so
path: executorserver.so
- name: Upload assets for linux
if: ${{ matrix.os == 'ubuntu-latest' && contains(github.ref, 'v') }}
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create-release.outputs.upload_url }}
asset_path: executorserver
asset_name: executorserver-${{ matrix.GOARCH }}
asset_content_type: application/octet-stream
- name: Build on Windows
if: ${{ matrix.os == 'windows-latest' }}
env:
GOARCH: ${{ matrix.GOARCH }}
run: |
go generate ./cmd/executorserver
go build -o executorserver.exe ./cmd/executorserver
- name: Build shared object on Windows
if: ${{ matrix.os == 'windows-latest' }}
env:
GOARCH: ${{ matrix.GOARCH }}
run: |
go build -buildmode=c-shared -o executorserver.dll ./cmd/ffi
- name: Upload executorserver.exe on Windows
if: ${{ matrix.os == 'windows-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorServer-${{ matrix.GOARCH }}.exe
path: executorserver.exe
- name: Upload executorserver.dll on Windows
if: ${{ matrix.os == 'windows-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorServer-${{ matrix.GOARCH }}.dll
path: executorserver.dll
- name: Upload assets for windows
if: ${{ matrix.os == 'windows-latest' && contains(github.ref, 'v') }}
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create-release.outputs.upload_url }}
asset_path: executorserver.exe
asset_name: executorserver-${{ matrix.GOARCH }}.exe
asset_content_type: application/octet-stream
- name: Build on macOS
if: ${{ matrix.os == 'macos-latest' }}
env:
GOARCH: ${{ matrix.GOARCH }}
run: |
go generate ./cmd/executorserver
go build -o executorserver ./cmd/executorserver
go build -o executorshell ./cmd/executorshell
- name: Build shared object on macOS
if: ${{ matrix.os == 'macos-latest' }}
env:
GOARCH: ${{ matrix.GOARCH }}
run: |
go build -buildmode=c-shared -o executorserver.dylib ./cmd/ffi
- name: Upload executorserver on macOS
if: ${{ matrix.os == 'macos-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorServer-MacOS-${{ matrix.GOARCH }}
path: executorserver
- name: Upload executorshell on macOS
if: ${{ matrix.os == 'macos-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorShell-MacOS-${{ matrix.GOARCH }}
path: executorshell
- name: Upload executorserver.dylib on macOS
if: ${{ matrix.os == 'macos-latest' }}
uses: actions/upload-artifact@v2
with:
name: ExecutorServer-${{ matrix.GOARCH }}.dylib
path: executorserver.dylib
- name: Upload assets for macOS
if: ${{ matrix.os == 'macos-latest' && contains(github.ref, 'v') }}
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create-release.outputs.upload_url }}
asset_path: executorserver
asset_name: executorserver-macOS-${{ matrix.GOARCH }}
asset_content_type: application/octet-stream

13
.gitignore vendored
View File

@ -28,10 +28,11 @@ node_modules
package*.json package*.json
example.js example.js
executor_server.h go-judge.h
/executor_server /go-judge*
/executorproxy
/executorshell
/executorserver
/cinit
/tmp /tmp
cmd/go-judge/version/version.txt
dist/
.vscode

168
.goreleaser.yaml Normal file
View File

@ -0,0 +1,168 @@
# This is an example .goreleaser.yml file with some sensible defaults.
# Make sure to check the documentation at https://goreleaser.com
project_name: go-judge
version: 2
before:
hooks:
# You may remove this if you don't use go modules.
- go mod tidy
- go mod download
# you may remove this if you don't need go generate
- go generate ./cmd/...
- go test ./... -v
builds:
- main: ./cmd/go-judge
binary: go-judge
env:
- CGO_ENABLED=0
goos:
- linux
- windows
- darwin
goarch:
- "386"
- amd64
- arm
- arm64
- riscv64
- loong64
- mips64le
- ppc64le
- s390x
goarm:
- "5"
- "7"
goarm64:
- "v8.0"
goamd64:
- v2
- v3
ignore:
- goos: windows
goarch: arm
tags:
- nomsgpack
- grpcnotrace
- main: ./cmd/go-judge-shell
binary: go-judge-shell
id: go-judge-shell
env:
- CGO_ENABLED=0
goos:
- linux
- windows
- darwin
goarch:
- "386"
- amd64
- arm
- arm64
- riscv64
- loong64
- mips64le
- ppc64le
- s390x
goarm:
- "5"
- "7"
goarm64:
- "v8.0"
goamd64:
- v2
- v3
ignore:
- goos: windows
goarch: arm
tags:
- nomsgpack
- grpcnotrace
# - main: ./cmd/go-judge-ffi
# binary: go-judge-ffi
# id: go-judge-ffi
# env:
# - CGO_ENABLED=1
# buildmode: c-shared
# goos:
# - linux
# goarch:
# - amd64
# goamd64:
# - v2
- main: ./cmd/go-judge-init
binary: go-judge-init
id: go-judge-init
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- "386"
- amd64
- arm
- arm64
- riscv64
- loong64
- mips64le
- ppc64le
- s390x
goarm:
- "5"
- "7"
goarm64:
- "v8.0"
goamd64:
- v2
- v3
archives:
- files:
- mount.yaml
formats: [ 'binary', 'tar.gz' ]
name_template: >-
{{ .Binary }}_{{ .Version }}_
{{- if eq .Os "darwin" }}macOS
{{- else }}{{ .Os }}{{ end }}_{{ .Arch }}
{{- with .Arm }}v{{ . }}{{ end }}
{{- with .Mips }}_{{ . }}{{ end }}
{{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{end}}
nfpms:
- license: MIT
maintainer: "criyle"
formats:
- apk
- deb
- rpm
# nix:
# - name: go-judge
# license: "mit"
dockers_v2:
- images:
- criyle/go-judge
dockerfile: "Dockerfile.goreleaser"
tags:
- "v{{ .Version }}"
- "{{ if .IsNightly }}nightly{{ end }}"
- "{{ if not .IsNightly }}latest{{ end }}"
extra_files:
- "mount.yaml"
platforms:
- linux/amd64
- linux/arm64
- linux/arm/v7
#- linux/arm/v5 disable for now: https://github.com/orgs/goreleaser/discussions/6005
- linux/ppc64le
- linux/s390x
- linux/riscv64
checksum:
name_template: "checksums.txt"
snapshot:
version_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"
release:
github:
owner: criyle
name: go-judge

19
CITATION.cff Normal file
View File

@ -0,0 +1,19 @@
cff-version: 1.2.0
title: "go-judge: High Performance Sandbox Service"
message: >-
If you use or benchmark this software, please cite
it using the metadata from this file.
type: software
authors:
- given-names: Yang
family-names: Gao
email: i@goj.ac
- name: All go-judge Contributors
repository-code: 'https://github.com/criyle/go-judge'
url: 'https://docs.goj.ac'
keywords:
- Online Judge
- Competitive Programming
- Sandbox
- Linux Container
license: MIT

View File

@ -1,26 +0,0 @@
FROM golang:alpine AS build
WORKDIR /go/judge
# CGO need g++, CGO is needed to compile c-shared
RUN apk --no-cache add build-base
COPY go.mod go.sum /go/judge/
RUN go mod download
COPY ./ /go/judge
RUN go build -o executorserver ./cmd/executorserver && \
go build -o cinit ./cmd/cinit && \
go build -buildmode=c-shared -o executor_server.so ./cmd/ffi/
FROM alpine:latest
WORKDIR /opt
COPY --from=build /go/judge/executorserver /go/judge/mount.yaml /opt/
EXPOSE 5050/tcp 5051/tcp
ENTRYPOINT ["./executorserver"]

View File

@ -1,23 +0,0 @@
FROM golang:latest AS build
WORKDIR /go/judge
COPY go.mod go.sum /go/judge/
RUN go mod download
COPY ./ /go/judge
RUN go build -o executorserver ./cmd/executorserver \
&& go build -o cinit ./cmd/cinit \
&& go build -buildmode=c-shared -o executor_server.so ./cmd/ffi/
FROM debian:latest
WORKDIR /opt
COPY --from=build /go/judge/executorserver /go/judge/mount.yaml /opt/
EXPOSE 5050/tcp 5051/tcp
ENTRYPOINT ["./executorserver"]

5
Dockerfile.goreleaser Normal file
View File

@ -0,0 +1,5 @@
FROM debian:latest
ARG TARGETPLATFORM
WORKDIR /opt
ENTRYPOINT [ "/opt/go-judge" ]
COPY $TARGETPLATFORM/go-judge mount.yaml /opt/

164
README.cn.md Normal file
View File

@ -0,0 +1,164 @@
# go-judge
[![Go Reference](https://pkg.go.dev/badge/github.com/criyle/go-judge.svg)](https://pkg.go.dev/github.com/criyle/go-judge) [![Go Report Card](https://goreportcard.com/badge/github.com/criyle/go-judge)](https://goreportcard.com/report/github.com/criyle/go-judge) [![Release](https://img.shields.io/github/v/tag/criyle/go-judge)](https://github.com/criyle/go-judge/releases/latest) ![Build](https://github.com/criyle/go-judge/workflows/Build/badge.svg)
[English](README.md) | [文档](https://docs.goj.ac/cn)
快速,简单,安全
## 快速上手
### 安装和运行
下载对应平台预编译二进制文件 `go-judge` [Release](https://github.com/criyle/go-judge/releases) 并在终端开启
或者使用 docker
```bash
docker run -it --rm --privileged --shm-size=256m -p 5050:5050 --name=go-judge criyle/go-judge
```
### REST API 接口
沙箱服务提供 REST API 接口来在受限制的环境中运行程序(默认监听于 `localhost:5050`)。
- **POST /run 在受限制的环境中运行程序**
- GET /file 得到所有在文件存储中的文件 ID 到原始命名映射
- POST /file 上传一个文件到文件存储,返回一个文件 ID 用于提供给 /run 接口
- GET /file/:fileId 下载文件 ID 指定的文件
- DELETE /file/:fileId 删除文件 ID 指定的文件
- /ws /run 接口的 WebSocket 版
- /stream 运行交互式命令。支持流式 api
- /version 获取构建的 Git 版本 (例如 v1.9.0) 以及运行时信息 (go 版本, 操作系统, 平台)
- /config 获取部分配置信息 (例如 fileStorePath, runnerConfig) 以及支持的功能特性
### REST API 接口定义
[接口数据类型定义](https://docs.goj.ac/cn/api#rest-api-接口定义)
### 示例
请使用 postman 或其他 REST API 调试工具向 http://localhost:5050/run 发送请求
[请求实例](https://docs.goj.ac/cn/example)
## 进阶设置
### 运行要求
- Linux 内核版本 >= 3.10
- 系统 Cgroup 文件系统挂载于 `/sys/fs/cgroup`Systemd 默认)
### 系统架构
```text
+----------------------------------------------------------------------------+
| 传输层 (HTTP / WebSocket / FFI / ...) |
+----------------------------------------------------------------------------+
| 工作协程 (运行环境池 和 运行环境生产者 ) |
+-----------------------------------------------------------+----------------+
| 运行环境 | 文件存储 |
+--------------------+----------------+---------------------+----------+-----+
| Linux (go-sandbox) | Windows (winc) | macOS (app sandbox) | 共享内存 | 磁盘 |
+--------------------+----------------+---------------------+----------+-----+
```
### 配置
服务相关:
- 默认监听地址是 `localhost:5050`,使用 `-http-addr` 指定
- 默认 gRPC 接口处于关闭状态,使用 `-enable-grpc` 开启
- 默认 gRPC 监听地址是 `localhost:5051` ,使用 `-grpc-addr` 指定
- 默认日志等级是 info ,使用 `-silent` 关闭 或 使用 `-release` 开启 release 级别日志(在 docker 中会自动开启)
- 默认没有开启鉴权,使用 `-auth-token` 指定令牌鉴权
- 默认没有开启 go 语言调试接口(`localhost:5052/debug`),使用 `-enable-debug` 开启,同时将日志层级设为 Debug
- 默认没有开启 prometheus 监控接口,使用 `-enable-metrics` 开启 `localhost:5052/metrics`
- 在启用 go 语言调试接口或者 prometheus 监控接口的情况下,默认监控接口为 `localhost:5052`,使用 `-monitor-addr` 指定
沙箱相关:
- 默认同时运行任务数为和 CPU 数量相同,使用 `-parallelism` 指定
- 使用 `-mount-conf` 指定沙箱文件系统挂载细节,详细请参见 [文件系统挂载](https://docs.goj.ac/cn/mount) (仅 Linux)
- 使用 `-file-timeout` 指定文件存储文件最大时间。超出时间的文件将会删除。(例如指定 `30m` 时,缓存文件将在创建后 30 分钟删除)
- 默认文件存储在共享内存文件系统中(`/dev/shm/`),可以使用 `-dir` 指定另外的本地目录为文件存储
- 默认最大输出限制为 `256MiB`,使用 `-output-limit` 指定 POSIX rlimit 的输出限制
- 默认最大 `copyOut` 文件大小为 `64MiB` ,使用 `-copy-out-limit` 指定
可以[在此查看更多配置文档](https://docs.goj.ac/cn/configuration)。
### 指标监控
[Prometheus 指标监控接口](https://docs.goj.ac/cn/api#prometheus-监控接口)
### 在沙箱中运行终端
从 [Release](https://github.com/criyle/go-judge/releases) 下载 `go-judge-shell` 。运行将连接本地 `go-judge` 沙箱服务并开启一个容器内的终端用于调试。
### /run 接口返回状态
- Accepted: 程序在资源限制内正常退出
- Memory Limit Exceeded: 超出内存限制
- Time Limit Exceeded: (通常 `exitStatus``9`(超时时被 `SIGKILL` 结束))
- 超出 `timeLimit` 时间限制
- 或者超过 `clockLimit` 等待时间限制
- Output Limit Exceeded:
- 超出 `pipeCollector` 限制
- 或者超出 `-output-limit` 最大输出限制
- File Error:
- `copyIn` 指定文件不存在
- 或者 `copyIn` 指定文件大小超出沙箱文件系统限制
- 或者 `copyOut` 指定文件不存在
- Non Zero Exit Status: 程序用非 0 返回值退出
- Signalled: 程序收到结束信号而退出(例如 `SIGSEGV`
- Dangerous Syscall: 程序被 `seccomp` 过滤器结束(默认不启用)
- Internal Error:
- 指定程序路径不存在
- 或者容器创建失败(比如使用非特权 docker
- 或者其他错误
### 容器的文件系统
在 Linux 平台,默认只读挂载点包括主机的 `/lib`, `/lib64`, `/usr`, `/bin`, `/etc/ld.so.cache`, `/etc/alternatives`, `/etc/fpc.cfg`, `/dev/null`, `/dev/urandom`, `/dev/random`, `/dev/zero`, `/dev/full` 和临时文件系统 `/w`, `/tmp` 以及 `/proc`
使用 `mount.yaml` [定制容器文件系统](https://docs.goj.ac/cn/mount#%E8%87%AA%E5%AE%9A%E4%B9%89%E6%8C%82%E8%BD%BD)。
不使用 `mount.yaml` 时,`/w` 的 `/tmp` 挂载 `tmpfs` 大小通过 `-tmp-fs-param` 指定,默认值为 `size=128m,nr_inodes=4k`
如果在容器的根目录存在 `/.env` 文件,那么这个文件会在容器创建时被载入。文件的每一行会作为环境变量的初始值加入到运行程序当中。
如果之后指定的挂载点目标在之前的挂载点之下,那么需要保证之前的挂载点存在目标文件或者文件夹。
### 注意
> [!WARNING]
> Window 和 macOS 平台为实验性支持,请不要在生产环境使用
#### 使用 cgroup
在 cgroup v1 系统上 `go-judge` 需要 `root` 权限创建 `cgroup`。请使用 `sudo``root` 用户运行或者确保运行用户拥有以下目录的读写权限 `/sys/fs/cgroup/cpuacct/gojudge`, `/sys/fs/cgroup/memory/gojudge`, `/sys/fs/cgroup/pids/gojudge`
在 cgroup v2 系统上,`go-judge` 会和 `system dbus` 沟通,创建一个临时 `scope`。如果 `systemd` 不存在,并且拥有 `root` 权限那么将尝试进行嵌套初始化。
如果没有 `cgroup` 的权限,那么 `cgroup` 相关的资源配置将不会生效。
#### cgroup v2
`go-judge` 目前已经支持 cgroup v2 鉴于越来越多的 Linux 发行版默认启用 cgroup v2 而不是 v1 (比如 Ubuntu 21.10+Fedora 31+)。然而,对于内核版本小于 5.19 的版本,因为 cgroup v2 在内存控制器里面缺少 `memory.max_usage_in_bytes`,内存使用量计数会转而采用 `maxrss` 指标。这项指标会显示的比使用 cgroup v1 时候要稍多,在运行使用内存较少的程序时比较明显。对于内核版本大于或等于 5.19 的版本,`memory.peak` 会被采用。
同时,如果本程序在容器中运行,容器中的进程会被移到 /api cgroup v2 层级中来开启 cgroup v2 嵌套支持。
`systemd``init` 的发行版中运行时,`go-judge` 会使用 `dbus` 通知 `systemd` 来创建一个临时 `scope` 作为 `cgroup` 的根。
在高于 5.7 的内核中运行时,`go-judge` 会尝试更快的 `clone3(CLONE_INTO_CGROUP)``vfork` 方法.
#### 内存使用
控制进程通常会使用 `20M` 内存。每个容器进程通常会占用 `20M` 内存 + 临时文件系统 (tmpfs) 大小 `2 * 128M`。对于每个请求,它将占用 用户程序限制的最大内存 + 额外限制 (`16k`) + 总 copy out 最大限制。请注意,缓存文件存储在宿主机的共享内存中 (`/dev/shm`),因此请确保分配了足够的空间。
比方说当并发数concurrency为 4 时,容器本身可能占用高达 `60 + (20+32) * 4M = 268M` + 4 \* 总 copy out 限制 + 总请求的最大内存限制。
由于 Go 语言运行时runtime的限制内存并不会自动返回给操作系统这可能会导致 OOM Killer 杀死进程。因此引入了一个后台工作线程用于检查堆内存使用情况并在必要时调用垃圾收集GC
- `-force-gc-target` 默认 `20m`,触发 GC 的最小堆内存使用量
- `-force-gc-interval` 默认 `5s`,检查内存使用情况的间隔时间

701
README.md
View File

@ -1,635 +1,162 @@
# go-judge # go-judge
[![GoDoc](https://godoc.org/github.com/criyle/go-judge?status.svg)](https://godoc.org/github.com/criyle/go-judge) [![Go Report Card](https://goreportcard.com/badge/github.com/criyle/go-judge)](https://goreportcard.com/report/github.com/criyle/go-judge) [![Release](https://img.shields.io/github/v/tag/criyle/go-judge)](https://github.com/criyle/go-judge/releases/latest) ![Build](https://github.com/criyle/go-judge/workflows/Build/badge.svg) [![Go Reference](https://pkg.go.dev/badge/github.com/criyle/go-judge.svg)](https://pkg.go.dev/github.com/criyle/go-judge) [![Go Report Card](https://goreportcard.com/badge/github.com/criyle/go-judge)](https://goreportcard.com/report/github.com/criyle/go-judge) [![Release](https://img.shields.io/github/v/tag/criyle/go-judge)](https://github.com/criyle/go-judge/releases/latest) ![Build](https://github.com/criyle/go-judge/workflows/Build/badge.svg)
## Executor Service [中文文档](README.cn.md) | [Documentation](https://docs.goj.ac)
Fast, Simple, Secure
## Quick Start
### Install & Run
Download compiled executable `go-judge` for your platform from [Release](https://github.com/criyle/go-judge/releases) and run.
Or, by docker
```bash
docker run -it --rm --privileged --shm-size=256m -p 5050:5050 --name=go-judge criyle/go-judge
```
### REST API
A REST service to run program in restricted environment (Listening on `localhost:5050` by default).
- **POST /run execute program in the restricted environment**
- GET /file list all cached file id to original name map
- POST /file prepare a file in the go judge (in memory), returns fileId (can be referenced in /run parameter)
- GET /file/:fileId downloads file from go judge (in memory), returns file content
- DELETE /file/:fileId delete file specified by fileId
- /ws WebSocket version for /run
- /stream WebSocket for stream run. Supports streaming interface
- GET /version gets build git version (e.g. `v1.9.0`) together with runtime information (go version, os, platform)
- GET /config gets some configuration (e.g. `fileStorePath`, `runnerConfig`) together with some supported features
### REST API Interface
[API Interface Structure Definition](https://docs.goj.ac/api#rest-api-interface)
### Example Request & Response
[Example Request & Response](https://docs.goj.ac/example)
## Documentation
### Prerequisite
- Linux Kernel Version >= 3.10
- Cgroup file system mounted at /sys/fs/cgroup. Usually done by systemd
### Architecture ### Architecture
#### Overall Architecture
```text ```text
+----------------------------------------------------------------------------------+ +----------------------------------------------------------------------------------+
| Transport Layer (HTTP / WebSocket / FFI / ...) | | Transport Layer (HTTP / WebSocket / FFI / ...) |
+----------------------------------------------------------------------------------+ +----------------------------------------------------------------------------------+
| Executor Worker | | Sandbox Worker (Environment Pool w/ Environment Builder ) |
+-----------------------------------------------------------+----------------------+ +-----------------------------------------------------------+----------------------+
| EnvExec + Environment Pool + Environment Builder | File Store | | EnvExec | File Store |
+--------------------+----------------+---------------------+--------+-------+-----+ +--------------------+----------------+---------------------+---------------+------+
| Linux (go-sandbox) | Windows (winc) | macOS (app sandbox) | Memory | Local | ... | | Linux (go-sandbox) | Windows (winc) | macOS (app sandbox) | Shared Memory | Disk |
+--------------------+----------------+---------------------+--------+-------+-----+ +--------------------+----------------+---------------------+---------------+------+
``` ```
A rest service to run program in restricted environment and it is basically a wrapper for `pkg/envexec` to run single / multiple programs. ### Configurations
- /run POST execute program in the restricted environment Server:
- /file GET list all cached file
- /file POST prepare a file in the executor service (in memory), returns fileId (can be referenced in /run parameter)
- /file/:fileId GET downloads file from executor service (in memory), returns file content
- /file/:fileId DELETE delete file specified by fileId
- /ws WebSocket for /run
- /metrics prometheus metrics (specifies `METRICS=1` environment variable to enable metrics)
- /debug (specifies `DEBUG=1` environment variable to enable go runtime debug endpoint)
- /version gets build git version (e.g. `v0.6.4-1-g20d2815`) together with runtime information (go version, os, platform)
### Install & Run Developing Server - The default binding address for the go judge is `localhost:5050`. Can be specified with `-http-addr` flag.
- By default gRPC endpoint is disabled, to enable gRPC endpoint, add `-enable-grpc` flag.
Install GO 1.13+ from [download](https://golang.org/dl/) - The default binding address for the gRPC go judge is `localhost:5051`. Can be specified with `-grpc-addr` flag.
- The default log level is info, use `-silent` to disable logs or use `-release` to enable release logger (auto turn on if in docker).
```bash
go get github.com/criyle/go-judge/cmd/executorserver
~/go/bin/executorserver # or executorserver if $(GOPATH)/bin is in your $PATH
```
Or, by docker
```bash
docker run -it --rm --privileged -p 5050:5050 criyle/executorserver:demo
```
Build by your own `docker build -t executorserver -f Dockerfile.exec .`
The `executorserver` need root privilege to create `cgroup`. Either creates sub-directory `/sys/fs/cgroup/cpuacct/executor_server`, `/sys/fs/cgroup/memory/executor_server`, `/sys/fs/cgroup/pids/executor_server` and make execution user readable or use `sudo` to run it.
#### Command Line Arguments
- The default binding address for the executor server is `:5050`. Can be specified with `-http-addr` flag.
- The default binding address for the gRPC executor server is `:5051`. Can be specified with `-grpc-addr` flag. (Notice: need to set `ES_ENABLE_GRPC=1` environment variable to enable GRPC endpoint)
- The default concurrency is `4`, Can be specified with `-parallelism` flag.
- The default file store is in memory, local cache can be specified with `-dir` flag.
- The default log level is debug, use `-silent` to disable logs or use `-release` to enable release logger (auto turn on if in docker).
- The default CGroup prefix is `executor_server`, Can be specified with `-cgroup-prefix` flag.
- `-auth-token` to add token-based authentication to REST / gRPC - `-auth-token` to add token-based authentication to REST / gRPC
- `-src-prefix` to restrict `src` copyIn path (need to be absolute path) - By default, the GO debug endpoints (`localhost:5052/debug`) are disabled, to enable, specifies `-enable-debug`, and it also enables debug log
- `-time-limit-checker-interval` specifies time limit checker interval (default 100ms) (valid value: \[1ms, 1s\]) - By default, the prometheus metrics endpoints (`localhost:5052/metrics`) are disabled, to enable, specifies `-enable-metrics`
- `-output-limit` specifies size limit of POSIX rlimit of output - Monitoring HTTP endpoint is enabled if metrics / debug is enabled, the default addr is `localhost:5052` and can be specified by `-monitor-addr`
- `-cpuset` specifies `cpuset.cpus` cgroup for each container
- `-container-cred-start` specifies container `setuid` / `setgid` credential start point (default: 10000)
- for example, by default container 0 will run with 10001 uid & gid and container 1 will run with 10002 uid & gid...
- `-enable-cpu-rate` enabled `cpu` cgroup to control cpu rate using cfs_quota & cfs_period control
- `-cpu-cfs-period` specifies cfs_period if cpu rate is enabled (default 100ms) (valid value: \[1ms, 1s\])
#### Environment Variables Sandbox:
Environment variable will be override by command line arguments if they both present. All command line arguments have its correspond environment variable. - The default concurrency equal to number of CPU, Can be specified with `-parallelism` flag.
- `-mount-conf` specifies detailed mount configuration, please refer [File System Mount](https://docs.goj.ac/mount) as a reference (Linux only)
- `-file-timeout` specifies maximum TTL for file created in file store e.g. `30m`)
- The default file store is in memory(`/dev/shm/`), local cache can be specified with `-dir` flag.
- `-output-limit` specifies size limit of POSIX rlimit of output (default 256MiB)
- `-copy-out-limit` specifies the default file copy out max (default 64MiB)
- The http binding address specifies as `ES_HTTP_ADDR=addr` You can find [more available configuration here](https://docs.goj.ac/configuration).
- The grpc binding address specifies as `ES_GRPC_ADDR=addr`
- The parallelism specifies as `ES_PARALLELISM=4`
- The token specifies as `ES_AUTH_TOKEN=token`
- `ES_ENABLE_GRPC=1` enables gRPC
- `ES_ENABLE_METRICS=1` enables metrics
- `ES_ENABLE_DEBUG=1` enables debug
### Build Shared object ### Run Terminal in the Container
Build container init `cinit`: Download `go-judge-shell` from [Release](https://github.com/criyle/go-judge/releases) and run. It will connect local `go-judge`, and open an interactive shell in the container for debugging purpose.
`go build -o cinit ./cmd/cinit` ### Return Status
Build `executor_server.so`: - Accepted: Program exited with status code 0 within time & memory limits
- Memory Limit Exceeded: Program uses more memory than memory limits
`go build -buildmode=c-shared -o executor_server.so ./cmd/ffi/` - Time Limit Exceeded: (`exitStatus` usually have value `9` as killed by `SIGKILL` after timeout)
- Program uses more CPU time than cpuLimit
For example, in JavaScript, run with `ffi-napi` (seems node 14 is not supported yet): - Or, program uses more clock time than clockLimit
- Output Limit Exceeded:
### Build Executor Proxy - Program output more than pipeCollector limits
- Or, program output more than output-limit
Build `go build ./cmd/executorproxy` - File Error:
- CopyIn file is not existed
Run `./executorproxy`, connect to gRPC endpoint and offers REST endpoint. - Or, CopyIn file too large for container file system
- Or, CopyOut file is not existed after program exited
### Build Executor Shell - Non Zero Exit Status: Program exited with non 0 status code within time & memory limits
- Signalled: Program exited with signal (e.g. `SIGSEGV`)
Build `go build ./cmd/executorshell` - Dangerous Syscall: Program killed by seccomp filter (not enabled by default)
- Internal Error:
Run `./executorshell`, connect to gRPC endpoint with interactive shell. - Program is not exist
- Or, container create not successful (e.g. not privileged docker)
- Or, other errors
### Container Root Filesystem ### Container Root Filesystem
- [x] necessary lib / exec / compiler / header readonly bind mounted from current file system: /lib /lib64 /bin /usr For linux platform, the default mounts points are bind mounting host's `/lib`, `/lib64`, `/usr`, `/bin`, `/etc/ld.so.cache`, `/etc/alternatives`, `/etc/fpc.cfg`, `/dev/null`, `/dev/urandom`, `/dev/random`, `/dev/zero`, `/dev/full` and mounts tmpfs at `/w`, `/tmp` and creates `/proc`.
- [x] work directory tmpfs mount: /w (work dir), /tmp (compiler temp files)
The following mounts point are examples that can be configured through config file later To [customize mount points](https://docs.goj.ac/mount#customization), please look at example `mount.yaml` file.
- additional compiler scripts / exec readonly bind mounted: /c If `mount.yaml` is not specified, the size of `tmpfs` for `/w` and `/tmp` is configured through `-tmp-fs-param` with default value `size=128m,nr_inodes=4k`
- additional header readonly bind mounted: /i
### Utilities If a file named `/.env` exists in the container rootfs, the container will load the file as environment variable line by line.
- pkg/envexec: run single / group of programs in parallel within restricted environment and resource constraints If a bind mount is specifying a target within the previous mounted one, please ensure the target exists in the previous mount point.
- pkg/pool: reference implementation for Cgroup & Environment Pool
### Windows Support ### Metrics Monitoring Endpoint
Build `executorserver` by: [Prometheus Metrics Monitoring Endpoint](https://docs.goj.ac/api#prometheus-monitor-api)
`go build ./cmd/executorserver/` ### Notice
Build `executor_server.dll`: (need to install `gcc` as well) > [!WARNING]
> Window and macOS support are experimental and should not be used in production environments
`go build -buildmode=c-shared -o executor_server.so ./cmd/ffi/` #### cgroup usage
Run: `./executorserver` For cgroup v1, the `go-judge` need root privilege to create `cgroup`. Either creates sub-directory `/sys/fs/cgroup/cpuacct/gojudge`, `/sys/fs/cgroup/memory/gojudge`, `/sys/fs/cgroup/pids/gojudge` and make execution user readable or use `sudo` to run it.
#### Windows Security For cgroup v2, systemd dbus will be used to create a transient scope for cgroup integration.
- Resources are limited by [JobObject](https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects) If no permission to create cgroup, the cgroup related limit will not be effective.
- Privillege are limited by [Restricted Low Mandatory Level Token](https://docs.microsoft.com/en-us/windows/win32/secauthz/access-tokens)
- Low Mandatory Level directory is created for read / write
### MacOS Support #### cgroup v2 support
Build `executorserver` by: The cgroup v2 is supported by `go-judge` now when running as root since more Linux distribution are enabling cgroup v2 by default (e.g. Ubuntu 21.10+, Fedora 31+). However, for kernel < 5.19, due to missing `memory.max_usage_in_bytes` in `memory` controller, the memory usage is now accounted by `maxrss` returned by `wait4` syscall. Thus, the memory usage appears higher than those who uses cgroup v1. For kernel >= 5.19, `memory.peak` is being used.
`go build ./cmd/executorserver/` When running in containers, the `go-judge` will migrate all processed into `/api` hierarchy to enable nesting support.
Build `executor_server.dylib`: (need to install `XCode`) When running in Linux distributions powered by `systemd`, the `go-judge` will contact `systemd` via `dbus` to create a transient scope as cgroup root.
`go build -buildmode=c-shared -o executor_server.dylib ./cmd/ffi/` When running with kernel >= 5.7, the `go-judge` will try faster `clone3(CLONE_INTO_CGROUP)` and `vfork` method.
Run: `./executorserver` #### Memory Usage
#### MacOS Security The controller usually consumes `20M` memory and each container usually consumes `20M` + size of tmpfs `2 * 128M`. For each request, it consumes as much as user program limit + extra limit (`16k`) + total copy out max. Notice that the cached file stores in the shared memory (`/dev/shm`) of the host, so please ensure enough size allocated.
- `sandbox-init` profile deny network access and file read / write For example, when concurrency = 4, the container itself can consume as much as `60 + (20+32) * 4M = 268M` + 4 * total copy out + total max memory of requests.
### Benchmark Due to limitation of GO runtime, the memory will not return to OS automatically, which could lead to OOM killer. A background worker was introduced to checks heap usage and invokes GC when necessary.
By `wrk` with `t.lua`: - `-force-gc-target` default `20m`, the minimal size to trigger GC
- `-force-gc-interval` default `5s`, the interval to check memory usage
- Tested single thread ~140-160 op/s macOS Docker Desktop & ~400-460 op/s Windows 10 WSL2.
- Tested multi thread ~1100-1200 op/s Windows 10 WSL2
```lua
wrk.method = "POST"
wrk.body = '{"cmd":[{"args":["/bin/cat","a.hs"],"env":["PATH=/usr/bin:/bin"],"files":[{"content":""},{"name":"stdout","max":10240},{"name":"stderr","max":10240}],"cpuLimit":10000000000,"memoryLimit":104857600,"procLimit":50,"copyIn":{"a.hs":{"content":"main = putStrLn \\"Hello, World!\\""},"b":{"content":"TEST"}}}]}'
wrk.headers["Content-Type"] = "application/json;charset=UTF-8"
```
`wrk -s t.lua -c 1 -t 1 -d 30s --latency http://localhost:5050/run`
e.g.:
```text
Running 30s test @ http://localhost:5050/run
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 2.17ms 446.05us 22.37ms 93.88%
Req/Sec 463.26 26.15 500.00 78.67%
Latency Distribution
50% 2.08ms
75% 2.27ms
90% 2.50ms
99% 3.13ms
13846 requests in 30.01s, 3.65MB read
Requests/sec: 461.30
Transfer/sec: 124.38KB
```
## TODO
- [x] Github actions to auto build
- [x] Configure mounts using YAML config file
- [x] Investigate root-free running mechanism (no cgroup && not set uid / gid)
- [x] Investigate RLimit settings (cpu, data, fsize, stack, noFile)
- [x] Add WebSocket for job submission
- [x] Windows support
- [x] MacOS support
- [x] GRPC + protobuf support
- [x] Token-based authentication
- [x] Prometheus metrics support
- [x] Customize container workDir, hostName & domainName
## API interface
```typescript
interface LocalFile {
src: string; // absolute path for the file
}
interface MemoryFile {
content: string | Buffer; // file contents
}
interface PreparedFile {
fileId: string; // fileId defines file uploaded by /file
}
interface Pipe {
name: string; // file name in copyOut
max: number; // maximum bytes to collect from pipe
}
interface Cmd {
args: string[]; // command line argument
env?: string[]; // environment
// specifies file input / pipe collector for program file descriptors
files?: (LocalFile | MemoryFile | PreparedFile | Pipe | null)[];
tty?: boolean; // enables tty on the input and output pipes (should have just one input & one output)
// Notice: must have TERM environment variables (e.g. TERM=xterm)
// limitations
cpuLimit?: number; // ns
realCpuLimit?: number; // ns
memoryLimit?: number; // byte
stackLimit?: number; // byte (N/A on windows, macOS cannot set over 32M)
procLimit?: number;
// copy the correspond file to the container dst path
copyIn?: {[dst:string]:LocalFile | MemoryFile | PreparedFile};
// copy out specifies files need to be copied out from the container after execution
copyOut?: string[];
// similar to copyOut but stores file in executor service and returns fileId, later download through /file/:fileId
copyOutCached?: string[];
// specifies the directory to dump container /w content
copyOutDir: string
// specifies the max file size to copy out
copyOutMax: number; // byte
}
enum Status {
Accepted, // normal
MemoryLimitExceeded, // mle
TimeLimitExceeded, // tle
OutputLimitExceeded, // ole
FileError, // fe
RuntimeError, // re
DangerousSyscall, // dgs
InternalError, // system error
}
interface PipeIndex {
index: number; // the index of cmd
fd: number; // the fd number of cmd
}
interface PipeMap {
in: PipeIndex; // input end of the pipe
out: PipeIndex; // output end of the pipe
}
interface Request {
requestId?: string; // for WebSocket requests
cmd: Cmd[];
pipeMapping: PipeMap[];
}
interface Result {
status: Status;
error?: string; // potential system error message
time: number; // ns (cgroup recorded time)
memory: number; // byte
runTime: number; // ns (wall clock time)
// copyFile name -> content
files?: {[name:string]:string};
// copyFileCached name -> fileId
fileIds?: {[name:string]:string};
}
// WebSocket results
interface WSResult {
requestId: string;
results: []Result;
error?: string;
}
```
### Example Request & Response
FFI:
```javascript
var ffi = require('ffi-napi');
var executor_server = ffi.Library('./executor_server', {
'Init': ['int', ['string']],
'Exec': ['string', ['string']],
'FileList': ['string', []],
'FileAdd': ['string', ['string']],
'FileGet': ['string', ['string']],
'FileDelete': ['string', ['string']]
});
if (executor_server.Init(JSON.stringify({
cinitPath: "/judge/cinit",
parallelism: 4,
}))) {
console.log("Failed to init executor server");
}
const result = JSON.parse(executor_server.Exec(JSON.stringify({
"cmd": [{
"args": ["/bin/cat", "test.txt"],
"env": ["PATH=/usr/bin:/bin"],
"files": [{
"content": ""
}, {
"name": "stdout",
"max": 10240
}, {
"name": "stderr",
"max": 10240
}],
"cpuLimit": 10000000000,
"memoryLimit": 104857600,
"procLimit": 50,
"copyIn": {
"test.txt": {
"content": "TEST"
}
}
}]
})));
console.log(result);
// Async
executor_server.Exec.async(JSON.stringify({
"cmd": [{
"args": ["/bin/cat", "test.txt"],
"env": ["PATH=/usr/bin:/bin"],
"files": [{
"content": ""
}, {
"name": "stdout",
"max": 10240
}, {
"name": "stderr",
"max": 10240
}],
"cpuLimit": 10000000000,
"memoryLimit": 104857600,
"procLimit": 50,
"copyIn": {
"test.txt": {
"content": "TEST"
}
}
}]
}), (err, res) => {
if (err) throw err;
console.log(JSON.parse(res));
});
const fileAdd = (param) => new Promise((resolve, reject) => {
executor_server.FileAdd.async(JSON.stringify(param), (err, res) => {
if (err != null) { reject(err); } else { resolve(res); }
});
});
const fileList = () => new Promise((resolve, reject) => {
executor_server.FileList.async((err, res) => {
if (err != null && res == null) { reject(err); } else { resolve(JSON.parse(res)); }
});
});
const fileGet = (param) => new Promise((resolve, reject) => {
executor_server.FileGet.async(JSON.stringify(param), (err, res) => {
if (err != null && res == null) { reject(err); } else { resolve(res); }
});
});
const fileDelete = (param) => new Promise((resolve, reject) => {
executor_server.FileDelete.async(JSON.stringify(param), (err, res) => {
if (err != null && res == null) { reject(err); } else { resolve(res); }
});
});
const fileOps = async () => {
const fileId = await fileAdd({ name: 'Name', content: 'Content' });
console.log(fileId);
const list = await fileList();
console.log(list);
const file = await fileGet({ id: fileId });
console.log(file);
const d = await fileDelete({ id: fileId });
console.log(d);
const e = await fileList();
console.log(e);
};
fileOps();
```
Output:
```javascript
{
requestId: '',
results: [
{
status: 'Accepted',
exitStatus: 0,
time: 814048,
memory: 253952,
files: [Object]
}
]
}
```
Single (this example require `apt install g++` inside the container):
```json
{
"cmd": [{
"args": ["/usr/bin/g++", "a.cc", "-o", "a"],
"env": ["PATH=/usr/bin:/bin"],
"files": [{
"content": ""
}, {
"name": "stdout",
"max": 10240
}, {
"name": "stderr",
"max": 10240
}],
"cpuLimit": 10000000000,
"memoryLimit": 104857600,
"procLimit": 50,
"copyIn": {
"a.cc": {
"content": "#include <iostream>\nusing namespace std;\nint main() {\nint a, b;\ncin >> a >> b;\ncout << a + b << endl;\n}"
}
},
"copyOut": ["stdout", "stderr"],
"copyOutCached": ["a.cc", "a"],
"copyOutDir": "1"
}]
}
```
```json
[
{
"status": "Accepted",
"exitStatus": 0,
"time": 303225231,
"memory": 32243712,
"runTime": 524177700,
"files": {
"stderr": "",
"stdout": ""
},
"fileIds": {
"a": "5LWIZAA45JHX4Y4Z",
"a.cc": "NOHPGGDTYQUFRSLJ"
}
}
]
```
Multiple (interaction problem):
```json
{
"cmd": [{
"args": ["/bin/cat", "1"],
"env": ["PATH=/usr/bin:/bin"],
"files": [{
"content": ""
}, null, {
"name": "stderr",
"max": 10240
}],
"cpuLimit": 1000000000,
"memoryLimit": 1048576,
"procLimit": 50,
"copyIn": {
"1": { "content": "TEST 1" }
},
"copyOut": ["stderr"]
},
{
"args": ["/bin/cat"],
"env": ["PATH=/usr/bin:/bin"],
"files": [null, {
"name": "stdout",
"max": 10240
}, {
"name": "stderr",
"max": 10240
}],
"cpuLimit": 1000000000,
"memoryLimit": 1048576,
"procLimit": 50,
"copyOut": ["stdout", "stderr"]
}],
"pipeMapping": [{
"in" : {"index": 0, "fd": 1 },
"out" : {"index": 1, "fd" : 0 }
}]
}
```
```json
[
{
"status": "Accepted",
"exitStatus": 0,
"time": 1545123,
"memory": 253952,
"runTime": 4148800,
"files": {
"stderr": ""
},
"fileIds": {}
},
{
"status": "Accepted",
"exitStatus": 0,
"time": 1501463,
"memory": 253952,
"runTime": 5897700,
"files": {
"stderr": "",
"stdout": "TEST 1"
},
"fileIds": {}
}
]
```
Compile On Windows (cygwin):
```json
{
"cmd": [{
"args": ["C:\\Cygwin\\bin\\g++", "a.cc", "-o", "a"],
"env": ["PATH=C:\\Cygwin\\bin;"],
"files": [{
"content": ""
}, {
"name": "stdout",
"max": 10240
}, {
"name": "stderr",
"max": 10240
}],
"cpuLimit": 10000000000,
"memoryLimit": 104857600,
"procLimit": 50,
"copyIn": {
"a.cc": {
"content": "#include <iostream>\n#include <signal.h>\n#include <unistd.h>\nusing namespace std;\nint main() {\nint a, b;\ncin >> a >> b;\ncout << a + b << endl;\n}"
}
},
"copyOutCached": ["a.exe"]
}]
}
```
```json
[
{
"status": "Accepted",
"exitStatus": 0,
"time": 140625000,
"memory": 36286464,
"files": {
"stderr": "",
"stdout": ""
},
"fileIds": {
"a.exe": "HLQH2OF4MXUUJBCB"
}
}
]
```
Infinite loop with cpu rate control:
```json
{
"cmd": [{
"args": ["/usr/bin/python3", "1.py"],
"env": ["PATH=/usr/bin:/bin"],
"files": [{"content": ""}, {"name": "stdout","max": 10240}, {"name": "stderr","max": 10240}],
"cpuLimit": 3000000000,
"realCpuLimit": 4000000000,
"memoryLimit": 104857600,
"procLimit": 50,
"cpuRate": 0.1,
"copyIn": {
"1.py": {
"content": "while True:\n pass"
}
}
}]
}
```
```json
[
{
"status": "Time Limit Exceeded",
"exitStatus": 9,
"time": 414803599,
"memory": 3657728,
"runTime": 4046054900,
"files": {
"stderr": "",
"stdout": ""
}
}
]
```

View File

@ -1,7 +0,0 @@
package main
import "github.com/criyle/go-sandbox/container"
func main() {
container.Init()
}

View File

@ -1,54 +0,0 @@
package main
import (
"net/http"
"runtime"
"github.com/criyle/go-judge/cmd/executorserver/model"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
)
type cmdHandle struct {
worker worker.Worker
srcPrefix string
}
func (h *cmdHandle) handleRun(c *gin.Context) {
var req model.Request
if err := c.ShouldBindJSON(&req); err != nil {
c.Error(err)
c.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
if len(req.Cmd) == 0 {
c.AbortWithStatusJSON(http.StatusBadRequest, "no cmd provided")
return
}
r, err := model.ConvertRequest(&req, h.srcPrefix)
if err != nil {
c.Error(err)
c.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
logger.Sugar().Debugf("request: %+v", r)
rt := <-h.worker.Submit(c.Request.Context(), r)
logger.Sugar().Debugf("response: %+v", rt)
execObserve(rt)
if rt.Error != nil {
c.Error(rt.Error)
c.AbortWithStatusJSON(http.StatusInternalServerError, rt.Error.Error())
return
}
c.JSON(http.StatusOK, model.ConvertResponse(rt).Results)
}
func handleVersion(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"buildVersion": Version,
"goVersion": runtime.Version(),
"platform": runtime.GOARCH,
"os": runtime.GOOS,
})
}

View File

@ -1,65 +0,0 @@
package config
import (
"os"
"time"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/koding/multiconfig"
)
// Config defines executor server configuration
type Config struct {
// container
ContainerInitPath string `flagUsage:"container init path"`
PreFork int `flagUsage:"control # of the prefork workers"`
TmpFsParam string `flagUsage:"tmpfs mount data (only for default mount with no mount.yaml)" default:"size=16m,nr_inodes=4k"`
NetShare bool `flagUsage:"share net namespace with host"`
MountConf string `flagUsage:"specifics mount configuration file" default:"mount.yaml"`
Parallelism int `flagUsage:"control the # of concurrency execution" default:"4"`
CgroupPrefix string `flagUsage:"control cgroup prefix" default:"executor_server"`
ContainerCredStart int `flagUsage:"control the start uid&gid for container" default:"10000"`
// file store
SrcPrefix string `flagUsage:"specifies directory prefix for source type copyin"`
Dir string `flagUsage:"specifies directory to store file upload / download (in memory by default)"`
// runner limit
TimeLimitCheckerInterval time.Duration `flagUsage:"specifies time limit checker interval" default:"100ms"`
ExtraMemoryLimit *envexec.Size `flagUsage:"specifies extra memory buffer for check memory limit" default:"16k"`
OutputLimit *envexec.Size `flagUsage:"specifies POSIX rlimit for output for each command" default:"256m"`
Cpuset string `flagUsage:"control the usage of cpuset for all containerd process"`
EnableCPURate bool `flagUsage:"enable cpu cgroup rate control"`
CPUCfsPeriod time.Duration `flagUsage:"set cpu.cfs_period" default:"100ms"`
// server config
HTTPAddr string `flagUsage:"specifies the http binding address" default:":5050"`
EnableGRPC bool `flagUsage:"enable gRPC endpoint"`
GRPCAddr string `flagUsage:"specifies the grpc binding address" default:":5051"`
AuthToken string `flagUsage:"bearer token auth for REST / gRPC"`
EnableDebug bool `flagUsage:"enable debug endpoint"`
EnableMetrics bool `flagUsage:"enable promethus metrics endpoint"`
// logger config
Release bool `flagUsage:"release level of logs"`
Silent bool `flagUsage:"do not print logs"`
}
// Load loads config from flag & environment variables
func (c *Config) Load() error {
cl := multiconfig.MultiLoader(
&multiconfig.TagLoader{},
&multiconfig.EnvironmentLoader{
Prefix: "ES",
CamelCase: true,
},
&multiconfig.FlagLoader{
CamelCase: true,
EnvPrefix: "ES",
},
)
if os.Getpid() == 1 {
c.Release = true
}
return cl.Load(c)
}

View File

@ -1,3 +0,0 @@
package main
//go:generate go run genversion.go

View File

@ -1,33 +0,0 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os/exec"
"strings"
)
func main() {
var ret bytes.Buffer
c := exec.Command("git", "describe", "--tags")
c.Stdout = &ret
if err := c.Run(); err != nil {
log.Fatalln("run git describe", err)
}
var buf bytes.Buffer
fmt.Fprintln(&buf, "// Code generated by go generate; DO NOT EDIT.")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, "package main")
fmt.Fprintln(&buf)
fmt.Fprintf(&buf, "const Version = `%s`", strings.TrimSpace(ret.String()))
if err := ioutil.WriteFile("version.go", buf.Bytes(), 0644); err != nil {
log.Fatalln("write file", err)
}
}

View File

@ -1,289 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/creack/pty"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pb"
"github.com/criyle/go-judge/worker"
)
const buffLen = 4096
var buffPool = sync.Pool{
New: func() interface{} {
return make([]byte, buffLen)
},
}
type execServer struct {
pb.UnimplementedExecutorServer
worker worker.Worker
fs filestore.FileStore
srcPrefix string
}
func (e *execServer) Exec(ctx context.Context, req *pb.Request) (*pb.Response, error) {
r, si, so, err := convertPBRequest(req, e.srcPrefix)
if err != nil {
return nil, err
}
if len(si) > 0 || len(so) > 0 {
return nil, fmt.Errorf("Stream in / out are not avaliable for exec request")
}
rt := <-e.worker.Submit(ctx, r)
execObserve(rt)
if rt.Error != nil {
return nil, err
}
return convertPBResponse(rt), nil
}
func (e *execServer) FileList(c context.Context, n *pb.Empty) (*pb.FileListType, error) {
return &pb.FileListType{
FileIDs: e.fs.List(),
}, nil
}
func (e *execServer) FileGet(c context.Context, f *pb.FileID) (*pb.FileContent, error) {
file := e.fs.Get(f.GetFileID())
content, err := file.Content()
if err != nil {
return nil, err
}
return &pb.FileContent{
Name: file.Name(),
Content: content,
}, nil
}
func (e *execServer) FileAdd(c context.Context, fc *pb.FileContent) (*pb.FileID, error) {
fid, err := e.fs.Add(fc.GetName(), fc.GetContent())
if err != nil {
return nil, err
}
return &pb.FileID{
FileID: fid,
}, nil
}
func (e *execServer) FileDelete(c context.Context, f *pb.FileID) (*pb.Empty, error) {
ok := e.fs.Remove(f.GetFileID())
if !ok {
return nil, fmt.Errorf("file id does not exists for %v", f.GetFileID())
}
return &pb.Empty{}, nil
}
func convertPBResponse(r worker.Response) *pb.Response {
res := &pb.Response{
RequestID: r.RequestID,
Results: make([]*pb.Response_Result, 0, len(r.Results)),
}
for _, c := range r.Results {
res.Results = append(res.Results, convertPBResult(c))
}
if r.Error != nil {
res.Error = r.Error.Error()
}
return res
}
func convertPBResult(r worker.Result) *pb.Response_Result {
return &pb.Response_Result{
Status: pb.Response_Result_StatusType(r.Status),
ExitStatus: int32(r.ExitStatus),
Error: r.Error,
Time: r.Time,
RunTime: r.RunTime,
Memory: r.Memory,
Files: r.Files,
FileIDs: r.FileIDs,
}
}
func convertPBRequest(r *pb.Request, srcPrefix string) (req *worker.Request, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
defer func() {
if err != nil {
for _, fi := range streamIn {
fi.Close()
}
streamIn = nil
for _, fi := range streamOut {
fi.Close()
}
streamOut = nil
}
}()
req = &worker.Request{
RequestID: r.RequestID,
Cmd: make([]worker.Cmd, 0, len(r.Cmd)),
PipeMapping: make([]worker.PipeMap, 0, len(r.PipeMapping)),
}
for _, c := range r.Cmd {
cm, si, so, err := convertPBCmd(c, srcPrefix)
streamIn = append(streamIn, si...)
streamOut = append(streamOut, so...)
if err != nil {
return nil, streamIn, streamOut, err
}
req.Cmd = append(req.Cmd, cm)
}
for _, p := range r.PipeMapping {
pm := convertPBPipeMap(p)
req.PipeMapping = append(req.PipeMapping, pm)
}
return req, streamIn, streamOut, nil
}
func convertPBPipeMap(p *pb.Request_PipeMap) worker.PipeMap {
return worker.PipeMap{
In: worker.PipeIndex{
Index: int(p.GetIn().GetIndex()),
Fd: int(p.GetIn().GetFd()),
},
Out: worker.PipeIndex{
Index: int(p.GetOut().GetIndex()),
Fd: int(p.GetOut().GetFd()),
},
}
}
func convertPBCmd(c *pb.Request_CmdType, srcPrefix string) (cm worker.Cmd, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
defer func() {
if err != nil {
for _, fi := range streamIn {
fi.Close()
}
streamIn = nil
for _, fi := range streamOut {
fi.Close()
}
streamOut = nil
}
}()
cm = worker.Cmd{
Args: c.GetArgs(),
Env: c.GetEnv(),
TTY: c.GetTty(),
CPULimit: c.GetCPULimit(),
RealCPULimit: c.GetRealCPULimit(),
MemoryLimit: c.GetMemoryLimit(),
StackLimit: c.GetStackLimit(),
ProcLimit: c.GetProcLimit(),
CPURateLimit: c.GetCPURateLimit(),
CopyOut: c.GetCopyOut(),
CopyOutCached: c.GetCopyOutCached(),
CopyOutMax: c.GetCopyOutMax(),
CopyOutDir: c.GetCopyOutDir(),
}
var (
fPty, fTty *os.File
ttyOut *fileStreamOut
)
for _, f := range c.GetFiles() {
var cf worker.CmdFile
switch fi := f.File.(type) {
case *pb.Request_File_StreamIn:
var si *fileStreamIn
if c.Tty {
fPty, fTty, err = pty.Open()
if err != nil {
return cm, streamIn, streamOut, err
}
si = &fileStreamIn{
name: fi.StreamIn.GetName(),
w: fPty,
r: fTty,
}
streamIn = append(streamIn, si)
} else {
si, err = newFileStreamIn(fi.StreamIn.GetName())
if err == nil {
streamIn = append(streamIn, si)
}
}
cf = si
case *pb.Request_File_StreamOut:
var so *fileStreamOut
if fPty != nil {
if ttyOut == nil {
ttyOut = &fileStreamOut{
name: fi.StreamOut.GetName(),
w: fTty,
r: fPty,
}
streamOut = append(streamOut, ttyOut)
}
so = ttyOut
} else {
so, err = newFileStreamOut(fi.StreamOut.GetName())
if err == nil {
streamOut = append(streamOut, so)
}
}
cf = so
default:
cf, err = convertPBFile(f, srcPrefix)
}
if err != nil {
return cm, streamIn, streamOut, err
}
cm.Files = append(cm.Files, cf)
}
if copyIn := c.GetCopyIn(); copyIn != nil {
cm.CopyIn = make(map[string]worker.CmdFile)
for k, f := range copyIn {
cf, err := convertPBFile(f, srcPrefix)
if err != nil {
return cm, streamIn, streamOut, err
}
cm.CopyIn[k] = cf
}
}
return cm, streamIn, streamOut, nil
}
func convertPBFile(c *pb.Request_File, srcPrefix string) (worker.CmdFile, error) {
switch c := c.File.(type) {
case nil:
return nil, nil
case *pb.Request_File_Local:
if srcPrefix != "" {
ok, err := checkPathPrefix(c.Local.GetSrc(), srcPrefix)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("file (%s) does not under (%s)", c.Local.GetSrc(), srcPrefix)
}
}
return &worker.LocalFile{Src: c.Local.GetSrc()}, nil
case *pb.Request_File_Memory:
return &worker.MemoryFile{Content: c.Memory.GetContent()}, nil
case *pb.Request_File_Cached:
return &worker.CachedFile{FileID: c.Cached.GetFileID()}, nil
case *pb.Request_File_Pipe:
return &worker.PipeCollector{Name: c.Pipe.GetName(), Max: c.Pipe.GetMax()}, nil
}
return nil, fmt.Errorf("request file type not supported yet %v", c)
}
func checkPathPrefix(path, prefix string) (bool, error) {
if filepath.IsAbs(path) {
return strings.HasPrefix(filepath.Clean(path), prefix), nil
}
wd, err := os.Getwd()
if err != nil {
return false, err
}
return strings.HasPrefix(filepath.Join(wd, path), prefix), nil
}

View File

@ -1,76 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/criyle/go-judge/filestore"
)
type fileStreamIn struct {
name string
r, w *os.File
}
func newFileStreamIn(name string) (*fileStreamIn, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
return &fileStreamIn{name: name, r: r, w: w}, nil
}
func (f *fileStreamIn) Name() string {
return f.name
}
func (f *fileStreamIn) Write(b []byte) (int, error) {
return f.w.Write(b)
}
func (f *fileStreamIn) EnvFile(fs filestore.FileStore) (interface{}, error) {
return f.r, nil
}
func (f *fileStreamIn) String() string {
return fmt.Sprintf("fileStreamIn:%s", f.name)
}
func (f *fileStreamIn) Close() error {
f.r.Close()
return f.w.Close()
}
type fileStreamOut struct {
name string
r, w *os.File
}
func newFileStreamOut(name string) (*fileStreamOut, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
return &fileStreamOut{name: name, r: r, w: w}, nil
}
func (f *fileStreamOut) Name() string {
return f.name
}
func (f *fileStreamOut) Read(b []byte) (int, error) {
return f.r.Read(b)
}
func (f *fileStreamOut) EnvFile(fs filestore.FileStore) (interface{}, error) {
return f.w, nil
}
func (f *fileStreamOut) String() string {
return fmt.Sprintf("fileStreamOut:%s", f.name)
}
func (f *fileStreamOut) Close() error {
f.w.Close()
return f.r.Close()
}

View File

@ -1,167 +0,0 @@
package main
import (
"fmt"
"io"
"github.com/creack/pty"
"github.com/criyle/go-judge/pb"
)
func (e *execServer) ExecStream(es pb.Executor_ExecStreamServer) error {
msg, err := es.Recv()
if err != nil {
return err
}
req := msg.GetExecRequest()
if req == nil {
return fmt.Errorf("The first stream request must be exec request")
}
rq, streamIn, streamOut, err := convertPBRequest(req, e.srcPrefix)
if err != nil {
return err
}
defer func() {
for _, fi := range streamIn {
fi.Close()
}
for _, fi := range streamOut {
fi.Close()
}
}()
errCh := make(chan error, 1)
// stream in
if len(streamIn) > 0 {
go func() {
err := streamInput(es, streamIn)
if err != nil {
writeErrCh(errCh, err)
}
}()
}
// stream out
outCh := make(chan *pb.StreamResponse_ExecOutput, len(streamOut))
if len(streamOut) > 0 {
for _, so := range streamOut {
so := so
go func() {
err := streamOutput(es.Context().Done(), outCh, so)
if err != nil {
writeErrCh(errCh, err)
}
}()
}
}
rtCh := e.worker.Execute(es.Context(), rq)
for {
select {
case err := <-errCh:
return err
case o := <-outCh:
err = es.Send(&pb.StreamResponse{
Response: o,
})
if err != nil {
return err
}
buffPool.Put(o.ExecOutput.Content[:cap(o.ExecOutput.Content)])
case rt := <-rtCh:
execObserve(rt)
if rt.Error != nil {
return err
}
return es.Send(&pb.StreamResponse{
Response: &pb.StreamResponse_ExecResponse{
ExecResponse: convertPBResponse(rt),
},
})
}
}
}
func streamOutput(done <-chan struct{}, outCh chan<- *pb.StreamResponse_ExecOutput, so *fileStreamOut) error {
for {
select {
case <-done:
return nil
default:
}
buf := buffPool.Get().([]byte)
n, err := so.Read(buf)
if err != nil {
return nil
}
outCh <- &pb.StreamResponse_ExecOutput{
ExecOutput: &pb.StreamResponse_Output{
Name: so.Name(),
Content: buf[:n],
},
}
}
}
func streamInput(es pb.Executor_ExecStreamServer, streamIn []*fileStreamIn) error {
inf := make(map[string]*fileStreamIn)
for _, f := range streamIn {
inf[f.Name()] = f
}
for {
select {
case <-es.Context().Done():
return nil
default:
}
in, err := es.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch i := in.Request.(type) {
case *pb.StreamRequest_ExecInput:
f, ok := inf[i.ExecInput.GetName()]
if !ok {
return fmt.Errorf("input %s not exists", i.ExecInput.GetName())
}
_, err = f.Write(i.ExecInput.Content)
if err != nil {
return fmt.Errorf("write to input %s with err %w", i.ExecInput.GetName(), err)
}
case *pb.StreamRequest_ExecResize:
f, ok := inf[i.ExecResize.GetName()]
if !ok {
return fmt.Errorf("input %s not exists", i.ExecResize.GetName())
}
winSize := &pty.Winsize{
Rows: uint16(i.ExecResize.Rows),
Cols: uint16(i.ExecResize.Cols),
X: uint16(i.ExecResize.X),
Y: uint16(i.ExecResize.Y),
}
err = pty.Setsize(f.w, winSize)
if err != nil {
return fmt.Errorf("resize to input %s with err %w", i.ExecResize.GetName(), err)
}
default:
return fmt.Errorf("the following request must be input request")
}
}
}
func writeErrCh(ch chan error, err error) {
select {
case ch <- err:
default:
}
}

View File

@ -1,293 +0,0 @@
// Command executorserver will starts a http server that receives command to run
// programs inside a sandbox.
package main
import (
"context"
"flag"
"log"
"net"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/criyle/go-judge/cmd/executorserver/config"
"github.com/criyle/go-judge/env"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pb"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/criyle/go-judge/pkg/pool"
"github.com/criyle/go-judge/worker"
ginpprof "github.com/gin-contrib/pprof"
ginzap "github.com/gin-contrib/zap"
"github.com/gin-gonic/gin"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
ginprometheus "github.com/zsais/go-gin-prometheus"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var logger *zap.Logger
func main() {
var conf config.Config
if err := conf.Load(); err != nil {
if err == flag.ErrHelp {
return
}
log.Fatalln("load config failed", err)
}
if !conf.Silent {
var err error
if conf.Release {
logger, err = zap.NewProduction()
} else {
config := zap.NewDevelopmentConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
logger, err = config.Build()
}
if err != nil {
log.Fatalln("init logger failed", err)
}
defer logger.Sync()
} else {
logger = zap.NewNop()
}
logger.Sugar().Infof("config loaded: %+v", conf)
// Init environment pool
fs := newFilsStore(conf.Dir)
b, err := env.NewBuilder(env.Config{
ContainerInitPath: conf.ContainerInitPath,
MountConf: conf.MountConf,
TmpFsParam: conf.TmpFsParam,
NetShare: conf.NetShare,
CgroupPrefix: conf.CgroupPrefix,
Cpuset: conf.Cpuset,
ContainerCredStart: conf.ContainerCredStart,
EnableCPURate: conf.EnableCPURate,
CPUCfsPeriod: conf.CPUCfsPeriod,
Logger: logger.Sugar(),
})
if err != nil {
log.Fatalln("create environment builder failed", err)
}
envPool := pool.NewPool(b)
if conf.PreFork > 0 {
logger.Sugar().Info("create ", conf.PreFork, " prefork containers")
m := make([]envexec.Environment, 0, conf.PreFork)
for i := 0; i < conf.PreFork; i++ {
e, err := envPool.Get()
if err != nil {
log.Fatalln("prefork environment failed", err)
}
m = append(m, e)
}
for _, e := range m {
envPool.Put(e)
}
}
work := worker.New(worker.Config{
FileStore: fs,
EnvironmentPool: envPool,
Parallelism: conf.Parallelism,
WorkDir: conf.Dir,
TimeLimitTickInterval: conf.TimeLimitCheckerInterval,
ExtraMemoryLimit: *conf.ExtraMemoryLimit,
OutputLimit: *conf.OutputLimit,
})
work.Start()
logger.Sugar().Infof("Starting worker with parallelism=%d, workdir=%s, timeLimitCheckInterval=%v",
conf.Parallelism, conf.Dir, conf.TimeLimitCheckerInterval)
var r *gin.Engine
if conf.Release {
gin.SetMode(gin.ReleaseMode)
}
r = gin.New()
if conf.Silent {
r.Use(gin.Recovery())
} else {
r.Use(ginzap.Ginzap(logger, time.RFC3339, true))
r.Use(ginzap.RecoveryWithZap(logger, true))
}
// Metrics Handle
if conf.EnableMetrics {
p := ginprometheus.NewPrometheus("gin")
p.ReqCntURLLabelMappingFn = func(c *gin.Context) string {
url := c.Request.URL.Path
for _, p := range c.Params {
if p.Key == "fid" {
url = strings.Replace(url, p.Value, ":fid", 1)
}
}
return url
}
p.Use(r)
}
// Version handle
r.GET("/version", handleVersion)
// Add auth token
if conf.AuthToken != "" {
r.Use(tokenAuth(conf.AuthToken))
logger.Sugar().Info("Attach token auth with token:", conf.AuthToken)
}
// File Handles
fh := &fileHandle{fs: fs}
r.GET("/file", fh.fileGet)
r.POST("/file", fh.filePost)
r.GET("/file/:fid", fh.fileIDGet)
r.DELETE("/file/:fid", fh.fileIDDelete)
// Run Handle
rh := &cmdHandle{worker: work, srcPrefix: conf.SrcPrefix}
r.POST("/run", rh.handleRun)
// WebSocket Handle
wh := &wsHandle{worker: work, srcPrefix: conf.SrcPrefix}
r.GET("/ws", wh.handleWS)
// pprof
if conf.EnableDebug {
ginpprof.Register(r)
}
// gRPC server
var grpcServer *grpc.Server
if conf.EnableGRPC {
grpc_zap.ReplaceGrpcLoggerV2(logger)
streamMiddleware := []grpc.StreamServerInterceptor{
grpc_prometheus.StreamServerInterceptor,
grpc_zap.StreamServerInterceptor(logger),
grpc_recovery.StreamServerInterceptor(),
}
unaryMiddleware := []grpc.UnaryServerInterceptor{
grpc_prometheus.UnaryServerInterceptor,
grpc_zap.UnaryServerInterceptor(logger),
grpc_recovery.UnaryServerInterceptor(),
}
if conf.AuthToken != "" {
authFunc := grpcTokenAuth(conf.AuthToken)
streamMiddleware = append(streamMiddleware, grpc_auth.StreamServerInterceptor(authFunc))
unaryMiddleware = append(unaryMiddleware, grpc_auth.UnaryServerInterceptor(authFunc))
}
grpcServer = grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(streamMiddleware...)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryMiddleware...)),
)
pb.RegisterExecutorServer(grpcServer, &execServer{
fs: fs,
worker: work,
srcPrefix: conf.SrcPrefix,
})
grpc_prometheus.Register(grpcServer)
grpc_prometheus.EnableHandlingTimeHistogram()
lis, err := net.Listen("tcp", conf.GRPCAddr)
if err != nil {
log.Fatalln(err)
}
go func() {
logger.Sugar().Info("Starting gRPC server at ", conf.GRPCAddr)
logger.Sugar().Info("gRPC serve finished: ", grpcServer.Serve(lis))
}()
}
srv := http.Server{
Addr: conf.HTTPAddr,
Handler: r,
}
go func() {
logger.Sugar().Info("Starting http server at ", conf.HTTPAddr)
logger.Sugar().Info("Http serve finished: ", srv.ListenAndServe())
}()
// Graceful shutdown...
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
<-sig
logger.Sugar().Info("Shutting Down...")
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*3)
defer cancel()
var eg errgroup.Group
eg.Go(func() error {
logger.Sugar().Info("Http server shutdown")
return srv.Shutdown(ctx)
})
eg.Go(func() error {
work.Shutdown()
logger.Sugar().Info("Worker shutdown")
return nil
})
if grpcServer != nil {
eg.Go(func() error {
grpcServer.GracefulStop()
logger.Sugar().Info("GRPC server shutdown")
return nil
})
}
go func() {
logger.Sugar().Info("Shutdown Finished: ", eg.Wait())
cancel()
}()
<-ctx.Done()
}
func tokenAuth(token string) gin.HandlerFunc {
const bearer = "Bearer "
return func(c *gin.Context) {
reqToken := c.GetHeader("Authorization")
if strings.HasPrefix(reqToken, bearer) && reqToken[len(bearer):] == token {
c.Next()
return
}
c.AbortWithStatus(http.StatusUnauthorized)
}
}
func grpcTokenAuth(token string) func(context.Context) (context.Context, error) {
return func(ctx context.Context) (context.Context, error) {
reqToken, err := grpc_auth.AuthFromMD(ctx, "bearer")
if err != nil {
return nil, err
}
if reqToken != token {
return nil, status.Errorf(codes.Unauthenticated, "invalid auth token: %v", err)
}
return ctx, nil
}
}
func newFilsStore(dir string) filestore.FileStore {
var fs filestore.FileStore
if dir == "" {
fs = filestore.NewFileMemoryStore()
} else {
os.MkdirAll(dir, 0755)
fs = filestore.NewFileLocalStore(dir)
}
return fs
}

View File

@ -1,81 +0,0 @@
package main
import (
"time"
"github.com/criyle/go-judge/worker"
"github.com/prometheus/client_golang/prometheus"
)
const (
metricsNamespace = "executorserver"
)
var (
// 1ms -> 10s
timeBuckets = []float64{
0.001, 0.002, 0.005, 0.008, 0.010, 0.025, 0.050, 0.075, 0.1, 0.2,
0.4, 0.6, 0.8, 1.0, 1.5, 2, 5, 10,
}
// 4k (1<<12) -> 1g (1<<30)
memoryBucket = prometheus.ExponentialBuckets(1<<12, 2, 19)
metricsSummaryQuantile = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
execErrorCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Name: "error",
Help: "Number of exec query returns error",
})
execTimeHist = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Name: "time_seconds",
Help: "Histogram for the running time",
Buckets: timeBuckets,
}, []string{"status"})
execTimeSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Namespace: metricsNamespace,
Name: "time",
Help: "Summary for the running time",
Objectives: metricsSummaryQuantile,
}, []string{"status"})
execMemHist = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Name: "memory_bytes",
Help: "Histgram for the memory",
Buckets: memoryBucket,
}, []string{"status"})
execMemSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Namespace: metricsNamespace,
Name: "memory",
Help: "Summary for the memory",
Objectives: metricsSummaryQuantile,
}, []string{"status"})
)
func init() {
prometheus.MustRegister(execErrorCount)
prometheus.MustRegister(execTimeHist, execTimeSummary)
prometheus.MustRegister(execMemHist, execMemSummary)
}
func execObserve(res worker.Response) {
if res.Error != nil {
execErrorCount.Inc()
}
for _, r := range res.Results {
status := r.Status.String()
d := time.Duration(r.Time)
ob := d.Seconds()
mob := float64(r.Memory)
execTimeHist.WithLabelValues(status).Observe(ob)
execTimeSummary.WithLabelValues(status).Observe(ob)
execMemHist.WithLabelValues(status).Observe(mob)
execMemSummary.WithLabelValues(status).Observe(mob)
}
}

View File

@ -1,229 +0,0 @@
package model
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/criyle/go-judge/pkg/envexec"
"github.com/criyle/go-judge/worker"
)
// CmdFile defines file from multiple source including local / memory / cached or pipe collector
type CmdFile struct {
Src *string `json:"src"`
Content *string `json:"content"`
FileID *string `json:"fileId"`
Name *string `json:"name"`
Max *int64 `json:"max"`
}
// Cmd defines command and limits to start a program using in envexec
type Cmd struct {
Args []string `json:"args"`
Env []string `json:"env,omitempty"`
Files []*CmdFile `json:"files,omitempty"`
TTY bool `json:"tty,omitempty"`
CPULimit uint64 `json:"cpuLimit"`
RealCPULimit uint64 `json:"realCpuLimit"`
MemoryLimit uint64 `json:"memoryLimit"`
StackLimit uint64 `json:"stackLimit"`
ProcLimit uint64 `json:"procLimit"`
CPURateLimit float64 `json:"cpuRateLimit"`
CopyIn map[string]CmdFile `json:"copyIn"`
CopyOut []string `json:"copyOut"`
CopyOutCached []string `json:"copyOutCached"`
CopyOutMax uint64 `json:"copyOutMax"`
CopyOutDir string `json:"copyOutDir"`
}
// PipeIndex defines indexing for a pipe fd
type PipeIndex struct {
Index int `json:"index"`
Fd int `json:"fd"`
}
// PipeMap defines in / out pipe for multiple program
type PipeMap struct {
In PipeIndex `json:"in"`
Out PipeIndex `json:"out"`
}
// Request defines single worker request
type Request struct {
RequestID string `json:"requestId"`
Cmd []Cmd `json:"cmd"`
PipeMapping []PipeMap `json:"pipeMapping"`
}
// Status offers JSON marshal for envexec.Status
type Status envexec.Status
// MarshalJSON convert status into string
func (s Status) MarshalJSON() ([]byte, error) {
return []byte("\"" + (envexec.Status)(s).String() + "\""), nil
}
// Result defines single command result
type Result struct {
Status Status `json:"status"`
ExitStatus int `json:"exitStatus"`
Error string `json:"error,omitempty"`
Time uint64 `json:"time"`
Memory uint64 `json:"memory"`
RunTime uint64 `json:"runTime"`
Files map[string]string `json:"files,omitempty"`
FileIDs map[string]string `json:"fileIds,omitempty"`
}
// Response defines worker response for single request
type Response struct {
RequestID string `json:"requestId"`
Results []Result `json:"results"`
ErrorMsg string `json:"error,omitempty"`
}
// ConvertResponse converts
func ConvertResponse(r worker.Response) Response {
ret := Response{
RequestID: r.RequestID,
Results: make([]Result, 0, len(r.Results)),
}
for _, r := range r.Results {
ret.Results = append(ret.Results, convertResult(r))
}
if r.Error != nil {
ret.ErrorMsg = r.Error.Error()
}
return ret
}
// ConvertRequest converts json request into worker request
func ConvertRequest(r *Request, srcPrefix string) (*worker.Request, error) {
req := &worker.Request{
RequestID: r.RequestID,
Cmd: make([]worker.Cmd, 0, len(r.Cmd)),
PipeMapping: make([]worker.PipeMap, 0, len(r.PipeMapping)),
}
for _, c := range r.Cmd {
wc, err := convertCmd(c, srcPrefix)
if err != nil {
return nil, err
}
req.Cmd = append(req.Cmd, wc)
}
for _, p := range r.PipeMapping {
req.PipeMapping = append(req.PipeMapping, convertPipe(p))
}
return req, nil
}
func convertResult(r worker.Result) Result {
res := Result{
Status: Status(r.Status),
ExitStatus: r.ExitStatus,
Error: r.Error,
Time: r.Time,
RunTime: r.RunTime,
Memory: r.Memory,
FileIDs: r.FileIDs,
}
if r.Files != nil {
res.Files = make(map[string]string)
for k, v := range r.Files {
res.Files[k] = string(v)
}
}
return res
}
func convertPipe(p PipeMap) worker.PipeMap {
return worker.PipeMap{
In: worker.PipeIndex{
Index: p.In.Index,
Fd: p.In.Fd,
},
Out: worker.PipeIndex{
Index: p.Out.Index,
Fd: p.Out.Fd,
},
}
}
func convertCmd(c Cmd, srcPrefix string) (worker.Cmd, error) {
w := worker.Cmd{
Args: c.Args,
Env: c.Env,
Files: make([]worker.CmdFile, 0, len(c.Files)),
TTY: c.TTY,
CPULimit: c.CPULimit,
RealCPULimit: c.RealCPULimit,
MemoryLimit: c.MemoryLimit,
StackLimit: c.StackLimit,
ProcLimit: c.ProcLimit,
CPURateLimit: c.CPURateLimit,
CopyOut: c.CopyOut,
CopyOutCached: c.CopyOutCached,
CopyOutMax: c.CopyOutMax,
CopyOutDir: c.CopyOutDir,
}
for _, f := range c.Files {
cf, err := convertCmdFile(f, srcPrefix)
if err != nil {
return w, err
}
w.Files = append(w.Files, cf)
}
if c.CopyIn != nil {
w.CopyIn = make(map[string]worker.CmdFile)
for k, f := range c.CopyIn {
cf, err := convertCmdFile(&f, srcPrefix)
if err != nil {
return w, err
}
w.CopyIn[k] = cf
}
}
return w, nil
}
func convertCmdFile(f *CmdFile, srcPrefix string) (worker.CmdFile, error) {
switch {
case f == nil:
return nil, nil
case f.Src != nil:
if srcPrefix != "" {
ok, err := checkPathPrefix(*f.Src, srcPrefix)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("file (%s) does not under (%s)", *f.Src, srcPrefix)
}
}
return &worker.LocalFile{Src: *f.Src}, nil
case f.Content != nil:
return &worker.MemoryFile{Content: []byte(*f.Content)}, nil
case f.FileID != nil:
return &worker.CachedFile{FileID: *f.FileID}, nil
case f.Max != nil && f.Name != nil:
return &worker.PipeCollector{Name: *f.Name, Max: *f.Max}, nil
default:
return nil, fmt.Errorf("file is not valid for cmd")
}
}
func checkPathPrefix(path, prefix string) (bool, error) {
if filepath.IsAbs(path) {
return strings.HasPrefix(filepath.Clean(path), prefix), nil
}
wd, err := os.Getwd()
if err != nil {
return false, err
}
return strings.HasPrefix(filepath.Join(wd, path), prefix), nil
}

View File

@ -1,5 +0,0 @@
// Code generated by go generate; DO NOT EDIT.
package main
const Version = `v0.8.4-2-gb1e9e0d`

View File

@ -1,91 +0,0 @@
package main
import (
"context"
"net/http"
"time"
"github.com/criyle/go-judge/cmd/executorserver/model"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
const (
writeWait = 10 * time.Second
pongWait = 60 * time.Second
pingPeriod = 50 * time.Second
)
type wsHandle struct {
worker worker.Worker
srcPrefix string
}
func (h *wsHandle) handleWS(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
c.Error(err)
c.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
resultCh := make(chan model.Response, 128)
// read request
go func() {
defer conn.Close()
conn.SetReadDeadline(time.Now().Add(pongWait))
conn.SetPongHandler(func(string) error {
conn.SetReadDeadline(time.Now().Add(pongWait))
return nil
})
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
for {
req := new(model.Request)
if err := conn.ReadJSON(req); err != nil {
logger.Sugar().Warn("ws read error:", err)
return
}
r, err := model.ConvertRequest(req, h.srcPrefix)
if err != nil {
logger.Sugar().Warn("convert error: ", err)
return
}
go func() {
ret := <-h.worker.Submit(ctx, r)
execObserve(ret)
resultCh <- model.ConvertResponse(ret)
}()
}
}()
// write result
go func() {
defer conn.Close()
ticker := time.NewTicker(pingPeriod)
defer ticker.Stop()
for {
select {
case r := <-resultCh:
conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := conn.WriteJSON(r); err != nil {
logger.Sugar().Warn("ws write error:", err)
return
}
case <-ticker.C:
conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}()
}

View File

@ -1,239 +0,0 @@
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"syscall"
"time"
"github.com/creack/pty"
"github.com/criyle/go-judge/pb"
"golang.org/x/crypto/ssh/terminal"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var (
srvAddr = flag.String("srvaddr", "localhost:5051", "GRPC server addr")
)
const (
cpuLimit = 20 * time.Second
sessionLimit = 30 * time.Minute
procLimit = 50
memoryLimit = 256 << 20 // 256m
pathEnv = "PATH=/usr/local/bin:/usr/bin:/bin"
)
var env = []string{
pathEnv,
"HOME=/tmp",
"TERM=" + os.Getenv("TERM"),
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) == 0 {
args = []string{"/bin/bash"}
}
token := os.Getenv("TOKEN")
opts := []grpc.DialOption{grpc.WithInsecure()}
if token != "" {
opts = append(opts, grpc.WithPerRPCCredentials(newTokenAuth(token)))
}
conn, err := grpc.Dial(*srvAddr, opts...)
if err != nil {
log.Fatalln("client", err)
}
client := pb.NewExecutorClient(conn)
sc, err := client.ExecStream(context.TODO())
if err != nil {
log.Fatalln("ExecStream", err)
}
log.Println("Starts", args)
r, err := run(sc, args)
log.Println("ExecStream Finished", r, err)
}
func run(sc pb.Executor_ExecStreamClient, args []string) (*pb.Response, error) {
req := &pb.Request{
Cmd: []*pb.Request_CmdType{{
Args: args,
Env: env,
Files: []*pb.Request_File{
{
File: &pb.Request_File_StreamIn{
StreamIn: &pb.Request_StreamInput{
Name: "stdin",
},
},
},
{
File: &pb.Request_File_StreamOut{
StreamOut: &pb.Request_StreamOutput{
Name: "stdout",
},
},
},
{
File: &pb.Request_File_StreamOut{
StreamOut: &pb.Request_StreamOutput{
Name: "stderr",
},
},
},
},
CPULimit: uint64(cpuLimit),
RealCPULimit: uint64(sessionLimit),
MemoryLimit: memoryLimit,
ProcLimit: procLimit,
Tty: true,
}},
}
err := sc.Send(&pb.StreamRequest{
Request: &pb.StreamRequest_ExecRequest{
ExecRequest: req,
},
})
if err != nil {
return nil, fmt.Errorf("ExecStream Send request %v", err)
}
// Set stdin in raw mode.
oldState, err := terminal.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
panic(err)
}
defer func() { _ = terminal.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort.
// pump msg
sendCh := make(chan *pb.StreamRequest, 64)
defer close(sendCh)
go func() {
for r := range sendCh {
err := sc.Send(r)
if err != nil {
log.Println("input", err)
return
}
}
}()
// pump stdin
go func() {
buf := make([]byte, 4096)
for {
n, err := os.Stdin.Read(buf)
if err == io.EOF {
sendCh <- &pb.StreamRequest{
Request: &pb.StreamRequest_ExecInput{
ExecInput: &pb.StreamRequest_Input{
Name: "stdin",
Content: []byte("\004"),
},
},
}
continue
}
if err != nil {
log.Println("stdin", err)
return
}
sendCh <- &pb.StreamRequest{
Request: &pb.StreamRequest_ExecInput{
ExecInput: &pb.StreamRequest_Input{
Name: "stdin",
Content: buf[:n],
},
},
}
}
}()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT)
// pump ^C
go func() {
for range sigCh {
sendCh <- &pb.StreamRequest{
Request: &pb.StreamRequest_ExecInput{
ExecInput: &pb.StreamRequest_Input{
Name: "stdin",
Content: []byte("\003"),
},
},
}
}
}()
// pump resize
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGWINCH)
go func() {
for range ch {
winSize, err := pty.GetsizeFull(os.Stdin)
if err != nil {
log.Println("get win size", err)
return
}
sendCh <- &pb.StreamRequest{
Request: &pb.StreamRequest_ExecResize{
ExecResize: &pb.StreamRequest_Resize{
Name: "stdin",
Rows: uint32(winSize.Rows),
Cols: uint32(winSize.Cols),
X: uint32(winSize.X),
Y: uint32(winSize.Y),
},
},
}
}
}()
ch <- syscall.SIGWINCH // Initial resize.
// pump stdout
for {
sr, err := sc.Recv()
if err != nil {
return nil, fmt.Errorf("ExecStream recv %v", err)
}
switch sr := sr.Response.(type) {
case *pb.StreamResponse_ExecOutput:
switch sr.ExecOutput.Name {
case "stdout":
os.Stdout.Write(sr.ExecOutput.Content)
case "stderr":
os.Stderr.Write(sr.ExecOutput.Content)
}
case *pb.StreamResponse_ExecResponse:
return sr.ExecResponse, nil
}
}
}
type tokenAuth struct {
token string
}
func newTokenAuth(token string) credentials.PerRPCCredentials {
return &tokenAuth{token: token}
}
// Return value is mapped to request headers.
func (t *tokenAuth) GetRequestMetadata(ctx context.Context, in ...string) (map[string]string, error) {
return map[string]string{
"authorization": "Bearer " + t.token,
}, nil
}
func (*tokenAuth) RequireTransportSecurity() bool {
return false
}

View File

@ -1,211 +0,0 @@
package main
import "C"
import (
"bytes"
"context"
"encoding/json"
"log"
"os"
"time"
"github.com/criyle/go-judge/cmd/executorserver/model"
"github.com/criyle/go-judge/env"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pkg/pool"
"github.com/criyle/go-judge/worker"
)
type initParameter struct {
CInitPath string `json:"cinitPath"`
Parallelism int `json:"parallelism"`
TmpFsParam string `json:"tmpfsParam"`
Dir string `json:"dir"`
NetShare bool `json:"netShare"`
MountConf string `json:"mountConf"`
SrcPrefix string `json:"srcPrefix"`
CgroupPrefix string `json:"cgroupPrefix"`
CPUSet string `json:"cpuset"`
CredStart int `json:"credStart"`
}
var (
fs filestore.FileStore
work worker.Worker
srcPrefix string
)
func newFilsStore(dir string) filestore.FileStore {
var fs filestore.FileStore
if dir == "" {
fs = filestore.NewFileMemoryStore()
} else {
os.MkdirAll(dir, 0755)
fs = filestore.NewFileLocalStore(dir)
}
return fs
}
// Init initialize the sandbox environment
//export Init
func Init(i *C.char) C.int {
is := C.GoString(i)
var ip initParameter
if err := json.NewDecoder(bytes.NewBufferString(is)).Decode(&ip); err != nil {
return -1
}
if ip.Parallelism == 0 {
ip.Parallelism = 4
}
if ip.TmpFsParam == "" {
ip.TmpFsParam = "size=16m,nr_inodes=4k"
}
if ip.MountConf == "" {
ip.MountConf = "mount.yaml"
}
srcPrefix = ip.SrcPrefix
fs = newFilsStore(ip.Dir)
b, err := env.NewBuilder(env.Config{
ContainerInitPath: ip.CInitPath,
MountConf: ip.MountConf,
TmpFsParam: ip.TmpFsParam,
NetShare: ip.NetShare,
CgroupPrefix: ip.CgroupPrefix,
Cpuset: ip.CPUSet,
ContainerCredStart: ip.CredStart,
Logger: nopLogger{},
})
if err != nil {
log.Fatalln("create environment builder failed", err)
}
envPool := pool.NewPool(b)
work = worker.New(worker.Config{
FileStore: fs,
EnvironmentPool: envPool,
Parallelism: ip.Parallelism,
WorkDir: ip.Dir,
TimeLimitTickInterval: 100 * time.Millisecond,
})
work.Start()
return 0
}
// Exec runs command inside container runner
//export Exec
func Exec(e *C.char) *C.char {
es := C.GoString(e)
var req model.Request
if err := json.NewDecoder(bytes.NewBufferString(es)).Decode(&req); err != nil {
return nil
}
r, err := model.ConvertRequest(&req, srcPrefix)
if err != nil {
return nil
}
rt := <-work.Submit(context.TODO(), r)
ret := model.ConvertResponse(rt)
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(ret); err != nil {
return nil
}
return C.CString(buf.String())
}
// FileList get the list of files in the file store
//export FileList
func FileList() *C.char {
ids := fs.List()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(ids); err != nil {
return nil
}
return C.CString(buf.String())
}
// FileAdd adds file to the file store
//export FileAdd
func FileAdd(e *C.char) *C.char {
type fileAdd struct {
Name string `json:"name"`
Content string `json:"content"`
}
es := C.GoString(e)
var f fileAdd
if err := json.NewDecoder(bytes.NewBufferString(es)).Decode(&f); err != nil {
return nil
}
id, err := fs.Add(f.Name, []byte(f.Content))
if err != nil {
return nil
}
return C.CString(id)
}
// FileGet gets file from file store by id
//export FileGet
func FileGet(e *C.char) *C.char {
type fileGet struct {
ID string `json:"id"`
}
es := C.GoString(e)
var f fileGet
if err := json.NewDecoder(bytes.NewBufferString(es)).Decode(&f); err != nil {
return nil
}
file := fs.Get(f.ID)
if file == nil {
return nil
}
c, err := file.Content()
if err != nil {
return nil
}
return C.CString(string(c))
}
// FileDelete deletes file from file store by id
//export FileDelete
func FileDelete(e *C.char) *C.char {
type fileDelete struct {
ID string `json:"id"`
}
es := C.GoString(e)
var f fileDelete
if err := json.NewDecoder(bytes.NewBufferString(es)).Decode(&f); err != nil {
return nil
}
ok := fs.Remove(f.ID)
if !ok {
return nil
}
return C.CString("")
}
type nopLogger struct {
}
func (nopLogger) Debug(args ...interface{}) {
}
func (nopLogger) Info(args ...interface{}) {
}
func (nopLogger) Warn(args ...interface{}) {
}
func (nopLogger) Error(args ...interface{}) {
}

View File

@ -0,0 +1,241 @@
package main
import "C"
import (
"bytes"
"context"
"encoding/json"
"io"
"log"
"os"
"runtime"
"strings"
"time"
"unsafe"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/env"
"github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/worker"
"go.uber.org/zap"
)
type initParameter struct {
CInitPath string `json:"cinitPath"`
Parallelism int `json:"parallelism"`
TmpFsParam string `json:"tmpfsParam"`
Dir string `json:"dir"`
NetShare bool `json:"netShare"`
MountConf string `json:"mountConf"`
SrcPrefix string `json:"srcPrefix"`
CgroupPrefix string `json:"cgroupPrefix"`
CPUSet string `json:"cpuset"`
CredStart int `json:"credStart"`
EnableCPURate bool `json:"enableCpuRate"`
CPUCfsPeriod time.Duration `json:"cpuCfsPeriod"`
NoFallback bool `json:"noFallback"`
}
var (
fs filestore.FileStore
work worker.Worker
srcPrefix []string
)
func newFileStore(dir string) (filestore.FileStore, error) {
if dir == "" {
if runtime.GOOS == "linux" {
dir = "/dev/shm"
} else {
dir = os.TempDir()
}
dir, _ = os.MkdirTemp(dir, "go-judge")
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
return filestore.NewFileLocalStore(dir), nil
}
// Init initialize the sandbox environment
//
//export Init
func Init(i *C.char) C.int {
is := C.GoString(i)
var ip initParameter
if err := json.NewDecoder(bytes.NewBufferString(is)).Decode(&ip); err != nil {
return -1
}
if ip.Parallelism <= 0 {
ip.Parallelism = 4
}
if ip.TmpFsParam == "" {
ip.TmpFsParam = "size=16m,nr_inodes=4k"
}
if ip.MountConf == "" {
ip.MountConf = "mount.yaml"
}
if ip.CPUCfsPeriod == 0 {
ip.CPUCfsPeriod = 100 * time.Millisecond
}
srcPrefix = strings.Split(ip.SrcPrefix, ",")
var err error
fs, err = newFileStore(ip.Dir)
if err != nil {
log.Fatalln("file store create failed", err)
}
b, _, err := env.NewBuilder(env.Config{
ContainerInitPath: ip.CInitPath,
MountConf: ip.MountConf,
TmpFsParam: ip.TmpFsParam,
NetShare: ip.NetShare,
CgroupPrefix: ip.CgroupPrefix,
Cpuset: ip.CPUSet,
ContainerCredStart: ip.CredStart,
EnableCPURate: ip.EnableCPURate,
CPUCfsPeriod: ip.CPUCfsPeriod,
NoFallback: ip.NoFallback,
}, zap.NewNop())
if err != nil {
log.Fatalln("create environment builder failed", err)
}
envPool := pool.NewPool(b)
work = worker.New(worker.Config{
FileStore: fs,
EnvironmentPool: envPool,
Parallelism: ip.Parallelism,
WorkDir: ip.Dir,
TimeLimitTickInterval: 100 * time.Millisecond,
})
work.Start()
return 0
}
// Exec runs command inside container runner
//
// Remember to free the return char pointer value
//
//export Exec
func Exec(e *C.char) *C.char {
es := C.GoString(e)
var req model.Request
if err := json.NewDecoder(bytes.NewBufferString(es)).Decode(&req); err != nil {
return nil
}
r, err := model.ConvertRequest(&req, srcPrefix)
if err != nil {
return nil
}
rtCh, _ := work.Submit(context.TODO(), r)
rt := <-rtCh
ret, err := model.ConvertResponse(rt, true)
if err != nil {
return nil
}
defer ret.Close()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(ret); err != nil {
return nil
}
return C.CString(buf.String())
}
// FileList get the list of files in the file store.
//
// Remember to free the 2-d char array `ids` and `names`
//
//export FileList
func FileList(ids ***C.char, names ***C.char) C.size_t {
res := fs.List()
idsWrap := C.malloc(C.size_t(len(res)) * C.size_t(unsafe.Sizeof(uintptr(0))))
namesWrap := C.malloc(C.size_t(len(res)) * C.size_t(unsafe.Sizeof(uintptr(0))))
pIDsWrap := (*[1<<30 - 1]*C.char)(idsWrap)
pNamesWrap := (*[1<<30 - 1]*C.char)(namesWrap)
idx := 0
for id, name := range res {
pIDsWrap[idx] = C.CString(id)
pNamesWrap[idx] = C.CString(name)
idx++
}
*ids = (**C.char)(idsWrap)
*names = (**C.char)(namesWrap)
return C.size_t(len(res))
}
// FileAdd adds file to the file store
//
// Remember to free the return char pointer value
//
//export FileAdd
func FileAdd(content *C.char, contentLen C.int, name *C.char) *C.char {
sContent := C.GoBytes(unsafe.Pointer(content), contentLen)
f, err := fs.New()
if err != nil {
return nil
}
defer f.Close()
if _, err := f.Write(sContent); err != nil {
return nil
}
id, err := fs.Add(C.GoString(name), f.Name())
if err != nil {
return nil
}
return C.CString(id)
}
// FileGet gets file from file store by id.
// If the return value is a positive number or zero, the value represents the length of the file.
// Otherwise, if the return value is negative, the following error occurred:
//
// - `-1`: The file does not exist.
// - `-2`: go-judge internal error.
//
// Remember to free `out`.
//
//export FileGet
func FileGet(e *C.char, out **C.char) C.int {
es := C.GoString(e)
_, file := fs.Get(es)
if file == nil {
return -1
}
r, err := envexec.FileToReader(file)
if err != nil {
return -2
}
defer r.Close()
c, err := io.ReadAll(r)
if err != nil {
return -2
}
*out = (*C.char)(C.CBytes(c))
return (C.int)(len(c))
}
// FileDelete deletes file from file store by id, returns 0 if failed.
//
//export FileDelete
func FileDelete(e *C.char) C.int {
es := C.GoString(e)
ok := fs.Remove(es)
if !ok {
return 0
}
return 1
}

View File

@ -4,16 +4,18 @@ package main
import ( import (
"context" "context"
"flag" "flag"
"io/ioutil" "io"
"log" "log"
"net/http" "net/http"
"os" "os"
"github.com/criyle/go-judge/pb" "github.com/criyle/go-judge/pb"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/golang/protobuf/jsonpb"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/types/known/emptypb"
) )
var ( var (
@ -27,7 +29,12 @@ type execProxy struct {
func (p *execProxy) Exec(c *gin.Context) { func (p *execProxy) Exec(c *gin.Context) {
req := new(pb.Request) req := new(pb.Request)
if err := jsonpb.Unmarshal(c.Request.Body, req); err != nil { b, err := io.ReadAll(c.Request.Body)
if err != nil {
c.AbortWithError(http.StatusBadRequest, err)
return
}
if err := protojson.Unmarshal(b, req); err != nil {
c.AbortWithError(http.StatusBadRequest, err) c.AbortWithError(http.StatusBadRequest, err)
return return
} }
@ -41,7 +48,7 @@ func (p *execProxy) Exec(c *gin.Context) {
} }
func (p *execProxy) FileList(c *gin.Context) { func (p *execProxy) FileList(c *gin.Context) {
rep, err := p.client.FileList(c, &pb.Empty{}) rep, err := p.client.FileList(c, &emptypb.Empty{})
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
@ -59,9 +66,9 @@ func (p *execProxy) FileGet(c *gin.Context) {
return return
} }
fid := &pb.FileID{ fid := pb.FileID_builder{
FileID: uri.FileID, FileID: uri.FileID,
} }.Build()
rep, err := p.client.FileGet(c, fid) rep, err := p.client.FileGet(c, fid)
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
@ -82,16 +89,16 @@ func (p *execProxy) FilePost(c *gin.Context) {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
b, err := ioutil.ReadAll(fi) b, err := io.ReadAll(fi)
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
req := &pb.FileContent{ req := pb.FileContent_builder{
Name: fh.Filename, Name: fh.Filename,
Content: b, Content: b,
} }.Build()
rep, err := p.client.FileAdd(c, req) rep, err := p.client.FileAdd(c, req)
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
@ -110,9 +117,9 @@ func (p *execProxy) FileDelete(c *gin.Context) {
return return
} }
fid := &pb.FileID{ fid := pb.FileID_builder{
FileID: uri.FileID, FileID: uri.FileID,
} }.Build()
rep, err := p.client.FileDelete(c, fid) rep, err := p.client.FileDelete(c, fid)
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
@ -124,11 +131,11 @@ func (p *execProxy) FileDelete(c *gin.Context) {
func main() { func main() {
flag.Parse() flag.Parse()
token := os.Getenv("TOKEN") token := os.Getenv("TOKEN")
opts := []grpc.DialOption{grpc.WithInsecure()} opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
if token != "" { if token != "" {
opts = append(opts, grpc.WithPerRPCCredentials(newTokenAuth(token))) opts = append(opts, grpc.WithPerRPCCredentials(newTokenAuth(token)))
} }
conn, err := grpc.Dial(*srvAddr, opts...) conn, err := grpc.NewClient(*srvAddr, opts...)
if err != nil { if err != nil {
log.Fatalln("client", err) log.Fatalln("client", err)
} }

View File

@ -0,0 +1,12 @@
package main
import (
"os"
"github.com/criyle/go-sandbox/container"
)
func main() {
container.Init()
os.Exit(2)
}

275
cmd/go-judge-shell/grpc.go Normal file
View File

@ -0,0 +1,275 @@
package main
import (
"context"
"errors"
"log"
"os"
"strings"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/cmd/go-judge/stream"
"github.com/criyle/go-judge/pb"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/types/known/emptypb"
)
var _ Stream = &grpcWrapper{}
type grpcWrapper struct {
sc pb.Executor_ExecStreamClient
}
func newGrpc(args []string, srvAddr string) Stream {
token := os.Getenv("TOKEN")
opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
if token != "" {
opts = append(opts, grpc.WithPerRPCCredentials(newTokenAuth(token)))
}
conn, err := grpc.NewClient(srvAddr, opts...)
if err != nil {
log.Fatalln("client", err)
}
client := pb.NewExecutorClient(conn)
sc, err := client.ExecStream(context.TODO())
if err != nil {
log.Fatalln("exec_stream", err)
}
log.Println("start", args)
return &grpcWrapper{sc: sc}
}
func (w *grpcWrapper) Send(req *stream.Request) error {
switch {
case req.Request != nil:
w.sc.Send(convertPBRequest(req.Request))
case req.Input != nil:
w.sc.Send(pb.StreamRequest_builder{ExecInput: pb.StreamRequest_Input_builder{
Index: uint32(req.Input.Index),
Fd: uint32(req.Input.Fd),
Content: req.Input.Content,
}.Build()}.Build())
case req.Resize != nil:
w.sc.Send(pb.StreamRequest_builder{ExecResize: pb.StreamRequest_Resize_builder{
Index: uint32(req.Resize.Index),
Fd: uint32(req.Resize.Fd),
Rows: uint32(req.Resize.Rows),
Cols: uint32(req.Resize.Cols),
X: uint32(req.Resize.X),
Y: uint32(req.Resize.Y),
}.Build()}.Build())
case req.Cancel != nil:
w.sc.Send(pb.StreamRequest_builder{ExecCancel: &emptypb.Empty{}}.Build())
default:
return errors.New("send: unknown operation")
}
return nil
}
func (w *grpcWrapper) Recv() (*stream.Response, error) {
resp, err := w.sc.Recv()
if err != nil {
return nil, err
}
switch resp.WhichResponse() {
case pb.StreamResponse_ExecOutput_case:
return &stream.Response{Output: &stream.OutputResponse{
Index: int(resp.GetExecOutput().GetIndex()),
Fd: int(resp.GetExecOutput().GetFd()),
Content: resp.GetExecOutput().GetContent(),
}}, nil
case pb.StreamResponse_ExecResponse_case:
return &stream.Response{Response: &model.Response{
RequestID: resp.GetExecResponse().GetRequestID(),
Results: convertPBResult(resp.GetExecResponse().GetResults()),
ErrorMsg: resp.GetExecResponse().GetError(),
}}, nil
}
return nil, errors.New("recv: invalid response")
}
func convertPBResult(res []*pb.Response_Result) []model.Result {
var ret []model.Result
for _, r := range res {
ret = append(ret, model.Result{
Status: model.Status(r.GetStatus()),
ExitStatus: int(r.GetExitStatus()),
Error: r.GetError(),
Time: r.GetTime(),
RunTime: r.GetRunTime(),
Memory: r.GetMemory(),
Files: convertFiles(r.GetFiles()),
Buffs: r.GetFiles(),
FileIDs: r.GetFileIDs(),
FileError: convertPBFileError(r.GetFileError()),
})
}
return ret
}
func convertFiles(buf map[string][]byte) map[string]string {
ret := make(map[string]string, len(buf))
for k, v := range buf {
ret[k] = byteArrayToString(v)
}
return ret
}
func convertPBRequest(req *model.Request) *pb.StreamRequest {
ret := pb.StreamRequest_builder{
ExecRequest: pb.Request_builder{
RequestID: req.RequestID,
Cmd: convertPBCmd(req.Cmd),
PipeMapping: convertPBPipeMapping(req.PipeMapping),
}.Build(),
}.Build()
return ret
}
func convertPBFileError(fe []*pb.Response_FileError) []model.FileError {
var ret []model.FileError
for _, v := range fe {
ret = append(ret, model.FileError{
Name: v.GetName(),
Type: model.FileErrorType(v.GetType()),
Message: v.GetMessage(),
})
}
return ret
}
func convertPBCmd(cmd []model.Cmd) []*pb.Request_CmdType {
var ret []*pb.Request_CmdType
for _, c := range cmd {
ret = append(ret, pb.Request_CmdType_builder{
Args: c.Args,
Env: c.Env,
Tty: c.TTY,
Files: convertPBFiles(c.Files),
CpuTimeLimit: c.CPULimit,
ClockTimeLimit: c.ClockLimit,
MemoryLimit: c.MemoryLimit,
StackLimit: c.StackLimit,
ProcLimit: c.ProcLimit,
CpuRateLimit: c.CPURateLimit,
CpuSetLimit: c.CPUSetLimit,
DataSegmentLimit: c.DataSegmentLimit,
AddressSpaceLimit: c.AddressSpaceLimit,
CopyIn: convertPBCopyIn(c.CopyIn),
CopyOut: convertPBCopyOut(c.CopyOut),
CopyOutCached: convertPBCopyOut(c.CopyOutCached),
CopyOutMax: c.CopyOutMax,
CopyOutDir: c.CopyOutDir,
Symlinks: convertSymlink(c.CopyIn),
}.Build())
}
return ret
}
func convertPBCopyIn(copyIn map[string]model.CmdFile) map[string]*pb.Request_File {
rt := make(map[string]*pb.Request_File, len(copyIn))
for k, i := range copyIn {
if i.Symlink != nil {
continue
}
rt[k] = convertPBFile(i)
}
return rt
}
func convertPBCopyOut(copyOut []string) []*pb.Request_CmdCopyOutFile {
rt := make([]*pb.Request_CmdCopyOutFile, 0, len(copyOut))
for _, n := range copyOut {
optional := false
if strings.HasSuffix(n, "?") {
optional = true
n = strings.TrimSuffix(n, "?")
}
rt = append(rt, pb.Request_CmdCopyOutFile_builder{
Name: n,
Optional: optional,
}.Build())
}
return rt
}
func convertSymlink(copyIn map[string]model.CmdFile) map[string]string {
ret := make(map[string]string)
for k, v := range copyIn {
if v.Symlink == nil {
continue
}
ret[k] = *v.Symlink
}
return ret
}
func convertPBFiles(files []*model.CmdFile) []*pb.Request_File {
var ret []*pb.Request_File
for _, f := range files {
if f == nil {
ret = append(ret, nil)
} else {
ret = append(ret, convertPBFile(*f))
}
}
return ret
}
func convertPBFile(i model.CmdFile) *pb.Request_File {
switch {
case i.Src != nil:
return pb.Request_File_builder{Local: pb.Request_LocalFile_builder{Src: *i.Src}.Build()}.Build()
case i.Content != nil:
s := strToBytes(*i.Content)
return pb.Request_File_builder{Memory: pb.Request_MemoryFile_builder{Content: s}.Build()}.Build()
case i.FileID != nil:
return pb.Request_File_builder{Cached: pb.Request_CachedFile_builder{FileID: *i.FileID}.Build()}.Build()
case i.Name != nil && i.Max != nil:
return pb.Request_File_builder{Pipe: pb.Request_PipeCollector_builder{Name: *i.Name, Max: *i.Max, Pipe: i.Pipe}.Build()}.Build()
case i.StreamIn:
return pb.Request_File_builder{StreamIn: &emptypb.Empty{}}.Build()
case i.StreamOut:
return pb.Request_File_builder{StreamOut: &emptypb.Empty{}}.Build()
}
return nil
}
func convertPBPipeMapping(pm []model.PipeMap) []*pb.Request_PipeMap {
var ret []*pb.Request_PipeMap
for _, p := range pm {
ret = append(ret, pb.Request_PipeMap_builder{
In: convertPBPipeIndex(p.In),
Out: convertPBPipeIndex(p.Out),
Name: p.Name,
Proxy: p.Proxy,
Max: uint64(p.Max),
}.Build())
}
return ret
}
func convertPBPipeIndex(pi model.PipeIndex) *pb.Request_PipeMap_PipeIndex {
return pb.Request_PipeMap_PipeIndex_builder{Index: int32(pi.Index), Fd: int32(pi.Fd)}.Build()
}
type tokenAuth struct {
token string
}
func newTokenAuth(token string) credentials.PerRPCCredentials {
return &tokenAuth{token: token}
}
// Return value is mapped to request headers.
func (t *tokenAuth) GetRequestMetadata(ctx context.Context, in ...string) (map[string]string, error) {
return map[string]string{
"authorization": "Bearer " + t.token,
}, nil
}
func (*tokenAuth) RequireTransportSecurity() bool {
return false
}

200
cmd/go-judge-shell/shell.go Normal file
View File

@ -0,0 +1,200 @@
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/cmd/go-judge/stream"
"golang.org/x/term"
)
var (
transport = flag.String("transport", "websocket", "defines transport layer (websocket / grpc)")
wsURL = flag.String("ws-url", "ws://localhost:5050/stream", "HTTP server url")
grpcAddr = flag.String("grpc-addr", "localhost:5051", "GRPC server addr")
copyInDir = flag.String("copy-in-dir", "", "directory to copy files from")
)
const (
cpuLimit = 20 * time.Second
sessionLimit = 30 * time.Minute
procLimit = 50
memoryLimit = 256 << 20 // 256m
pathEnv = "PATH=/usr/local/bin:/usr/bin:/bin"
)
var env = []string{
pathEnv,
"HOME=/tmp",
"TERM=" + os.Getenv("TERM"),
}
// Stream defines the transport layer for stream execution
type Stream interface {
Send(*stream.Request) error
Recv() (*stream.Response, error)
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) == 0 {
args = []string{"/bin/bash"}
}
var s Stream
switch *transport {
case "websocket":
s = newWebsocket(args, *wsURL)
case "grpc":
s = newGrpc(args, *grpcAddr)
default:
log.Fatalln("invalid transport: ", *transport)
}
r, err := run(s, args)
log.Printf("finished: %+v %v", r, err)
}
func run(sc Stream, args []string) (*model.Response, error) {
copyIn := make(map[string]model.CmdFile, 0)
if *copyInDir != "" {
_ = filepath.Walk(*copyInDir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
absPath, err := filepath.Abs(path)
if err != nil {
return nil
}
relPath, err := filepath.Rel(*copyInDir, path)
if err != nil {
return nil
}
if !info.IsDir() {
copyIn[relPath] = model.CmdFile{Src: &absPath}
}
return nil
})
}
req := model.Request{
Cmd: []model.Cmd{{
Args: args,
Env: env,
Files: []*model.CmdFile{
{StreamIn: true},
{StreamOut: true},
{StreamOut: true},
},
CopyIn: copyIn,
CPULimit: uint64(cpuLimit.Nanoseconds()),
ClockLimit: uint64(sessionLimit.Nanoseconds()),
MemoryLimit: memoryLimit,
ProcLimit: procLimit,
TTY: true,
}},
}
err := sc.Send(&stream.Request{Request: &req})
if err != nil {
return nil, fmt.Errorf("send request: %w", err)
}
// Set stdin in raw mode.
oldState, err := term.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
panic(err)
}
defer func() { _ = term.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort.
// pump msg
sendCh := make(chan *stream.Request, 64)
defer close(sendCh)
go func() {
for r := range sendCh {
err := sc.Send(r)
if err != nil {
log.Println("input", err)
return
}
}
}()
// pump stdin
forceQuit := false
go func() {
buf := make([]byte, 4096)
for {
n, err := os.Stdin.Read(buf)
if err == io.EOF {
sendCh <- &stream.Request{
Input: &stream.InputRequest{
Content: []byte("\004"),
},
}
continue
}
if n == 1 && buf[0] == 3 {
if forceQuit {
sendCh <- &stream.Request{
Cancel: &struct{}{},
}
}
forceQuit = true
} else {
forceQuit = false
}
if err != nil {
log.Println("stdin", err)
return
}
sendCh <- &stream.Request{
Input: &stream.InputRequest{
Content: buf[:n],
},
}
}
}()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT)
// pump ^C
go func() {
for range sigCh {
sendCh <- &stream.Request{
Input: &stream.InputRequest{
Content: []byte("\003"),
},
}
}
}()
// pump resize
handleSizeChange(sendCh)
// pump stdout
for {
sr, err := sc.Recv()
if err != nil {
return nil, fmt.Errorf("recv: %w", err)
}
switch {
case sr.Output != nil:
switch sr.Output.Fd {
case 1:
os.Stdout.Write(sr.Output.Content)
case 2:
os.Stderr.Write(sr.Output.Content)
}
case sr.Response != nil:
return sr.Response, nil
}
}
}

View File

@ -0,0 +1,35 @@
package main
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/creack/pty"
"github.com/criyle/go-judge/cmd/go-judge/stream"
)
func handleSizeChange(sendCh chan *stream.Request) {
// pump resize
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGWINCH)
go func() {
for range ch {
winSize, err := pty.GetsizeFull(os.Stdin)
if err != nil {
log.Println("get win size", err)
return
}
sendCh <- &stream.Request{
Resize: &stream.ResizeRequest{
Rows: int(winSize.Rows),
Cols: int(winSize.Cols),
X: int(winSize.X),
Y: int(winSize.Y),
},
}
}
}()
ch <- syscall.SIGWINCH // Initial resize.
}

View File

@ -0,0 +1,8 @@
//go:build !linux
package main
import "github.com/criyle/go-judge/cmd/go-judge/stream"
func handleSizeChange(sendCh chan *stream.Request) {
}

View File

@ -0,0 +1,11 @@
package main
import "unsafe"
func strToBytes(s string) []byte {
return unsafe.Slice(unsafe.StringData(s), len(s))
}
func byteArrayToString(buf []byte) string {
return *(*string)(unsafe.Pointer(&buf))
}

View File

@ -0,0 +1,104 @@
package main
import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/cmd/go-judge/stream"
"github.com/gorilla/websocket"
)
var _ Stream = &websocketStream{}
type websocketStream struct {
conn *websocket.Conn
}
func newWebsocket(args []string, wsURL string) Stream {
header := make(http.Header)
token := os.Getenv("TOKEN")
if token != "" {
header.Add("Authorization", "Bearer "+token)
}
conn, _, err := websocket.DefaultDialer.Dial(wsURL, header)
if err != nil {
log.Fatalln("ws connect: ", err)
}
log.Println("start", args)
return &websocketStream{conn: conn}
}
// Recv implements Stream.
func (s *websocketStream) Recv() (*stream.Response, error) {
_, r, err := s.conn.ReadMessage()
if err != nil {
return nil, err
}
if len(r) == 0 {
return nil, io.ErrUnexpectedEOF
}
resp := new(stream.Response)
switch r[0] {
case 1:
resp.Response = new(model.Response)
if err := json.Unmarshal(r[1:], resp.Response); err != nil {
return nil, err
}
case 2:
if len(r) < 2 {
return nil, io.ErrUnexpectedEOF
}
resp.Output = new(stream.OutputResponse)
resp.Output.Index = int(r[1]>>4) & 0xf
resp.Output.Fd = int(r[1]) & 0xf
resp.Output.Content = r[2:]
default:
return nil, fmt.Errorf("invalid type code: %d", r[0])
}
return resp, nil
}
// Send implements Stream.
func (s *websocketStream) Send(req *stream.Request) error {
w, err := s.conn.NextWriter(websocket.BinaryMessage)
if err != nil {
return err
}
defer w.Close()
switch {
case req.Request != nil:
if _, err := w.Write([]byte{1}); err != nil {
return err
}
if err := json.NewEncoder(w).Encode(req.Request); err != nil {
return err
}
case req.Resize != nil:
if _, err := w.Write([]byte{2}); err != nil {
return err
}
if err := json.NewEncoder(w).Encode(req.Resize); err != nil {
return err
}
case req.Input != nil:
if _, err := w.Write([]byte{3, byte(req.Input.Index<<4 | req.Input.Fd)}); err != nil {
return err
}
if _, err := w.Write(req.Input.Content); err != nil {
return err
}
case req.Cancel != nil:
if _, err := w.Write([]byte{4}); err != nil {
return err
}
default:
return fmt.Errorf("invalid request")
}
return nil
}

View File

@ -0,0 +1,90 @@
package config
import (
"os"
"runtime"
"time"
"github.com/criyle/go-judge/envexec"
"github.com/koding/multiconfig"
)
// Config defines go judge server configuration
type Config struct {
// container
ContainerInitPath string `flagUsage:"container init path"`
PreFork int `flagUsage:"control # of the prefork workers" default:"1"`
TmpFsParam string `flagUsage:"tmpfs mount data (only for default mount with no mount.yaml)" default:"size=128m,nr_inodes=4k"`
NetShare bool `flagUsage:"share net namespace with host"`
MountConf string `flagUsage:"specifies mount configuration file" default:"mount.yaml"`
SeccompConf string `flagUsage:"specifies seccomp filter" default:"seccomp.yaml"`
Parallelism int `flagUsage:"control the # of concurrency execution (default equal to number of cpu)"`
CgroupPrefix string `flagUsage:"control cgroup prefix" default:"gojudge"`
ContainerCredStart int `flagUsage:"control the start uid&gid for container (0 uses unprivileged root)" default:"0"`
NoFallback bool `flagUsage:"exit if fallback to rlimit / rusage mode"`
// file store
SrcPrefix []string `flagUsage:"specifies directory prefix for source type copyin (example: -src-prefix=/home,/usr)"`
Dir string `flagUsage:"specifies directory to store file upload / download (in memory by default)"`
// runner limit
TimeLimitCheckerInterval time.Duration `flagUsage:"specifies time limit checker interval" default:"100ms"`
ExtraMemoryLimit *envexec.Size `flagUsage:"specifies extra memory buffer for check memory limit" default:"16k"`
OutputLimit *envexec.Size `flagUsage:"specifies POSIX rlimit for output for each command" default:"256m"`
CopyOutLimit *envexec.Size `flagUsage:"specifies default file copy out max" default:"256m"`
OpenFileLimit int `flagUsage:"specifies max open file count" default:"256"`
Cpuset string `flagUsage:"control the usage of cpuset for all container process"`
EnableCPURate bool `flagUsage:"enable cpu cgroup rate control"`
CPUCfsPeriod time.Duration `flagUsage:"set cpu.cfs_period" default:"100ms"`
FileTimeout time.Duration `flagUsage:"specified timeout for filestore files"`
// server config
HTTPAddr string `flagUsage:"specifies the http binding address"`
EnableGRPC bool `flagUsage:"enable gRPC endpoint"`
GRPCAddr string `flagUsage:"specifies the grpc binding address"`
MonitorAddr string `flagUsage:"specifies the metrics binding address"`
AuthToken string `flagUsage:"bearer token auth for REST / gRPC"`
GRPCMsgSize *envexec.Size `flagUsage:"message size limit for gRPC message" default:"64m"`
EnableDebug bool `flagUsage:"enable debug endpoint"`
EnableMetrics bool `flagUsage:"enable prometheus metrics endpoint"`
// logger config
Release bool `flagUsage:"release level of logs"`
Silent bool `flagUsage:"do not print logs"`
// fix for high memory usage
ForceGCTarget *envexec.Size `flagUsage:"specifies force GC trigger heap size" default:"20m"`
ForceGCInterval time.Duration `flagUsage:"specifies force GC trigger interval" default:"5s"`
// show version and exit
Version bool `flagUsage:"show version and exit"`
}
// Load loads config from flag & environment variables
func (c *Config) Load() error {
cl := multiconfig.MultiLoader(
&multiconfig.TagLoader{},
&multiconfig.EnvironmentLoader{
Prefix: "ES",
CamelCase: true,
},
&multiconfig.FlagLoader{
CamelCase: true,
EnvPrefix: "ES",
},
)
if os.Getpid() == 1 {
c.Release = true
c.HTTPAddr = ":5050"
c.GRPCAddr = ":5051"
c.MonitorAddr = ":5052"
} else {
c.HTTPAddr = "localhost:5050"
c.GRPCAddr = "localhost:5051"
c.MonitorAddr = "localhost:5052"
}
if c.Parallelism <= 0 {
c.Parallelism = runtime.NumCPU()
}
return cl.Load(c)
}

View File

@ -0,0 +1,271 @@
package grpcexecutor
import (
"context"
"fmt"
"io"
"time"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pb"
"github.com/criyle/go-judge/worker"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// New creates grpc executor server
func New(worker worker.Worker, fs filestore.FileStore, srcPrefix []string, logger *zap.Logger) pb.ExecutorServer {
return &execServer{
worker: worker,
fs: fs,
srcPrefix: srcPrefix,
logger: logger,
}
}
type execServer struct {
pb.UnimplementedExecutorServer
worker worker.Worker
fs filestore.FileStore
srcPrefix []string
logger *zap.Logger
}
func (e *execServer) Exec(ctx context.Context, req *pb.Request) (*pb.Response, error) {
r, err := convertPBRequest(req, e.srcPrefix)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if ce := e.logger.Check(zap.DebugLevel, "request"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", r)))
}
rtCh, _ := e.worker.Submit(ctx, r)
rt := <-rtCh
if ce := e.logger.Check(zap.DebugLevel, "response"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", rt)))
}
if rt.Error != nil {
return nil, status.Error(codes.Internal, rt.Error.Error())
}
ret, err := model.ConvertResponse(rt, false)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
resp, err := convertPBResponse(ret)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return resp, nil
}
func (e *execServer) FileList(c context.Context, n *emptypb.Empty) (*pb.FileListType, error) {
return pb.FileListType_builder{
FileIDs: e.fs.List(),
}.Build(), nil
}
func (e *execServer) FileGet(c context.Context, f *pb.FileID) (*pb.FileContent, error) {
name, file := e.fs.Get(f.GetFileID())
if file == nil {
return nil, status.Errorf(codes.NotFound, "file not found: %q", f.GetFileID())
}
r, err := envexec.FileToReader(file)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer r.Close()
content, err := io.ReadAll(r)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return pb.FileContent_builder{
Name: name,
Content: content,
}.Build(), nil
}
func (e *execServer) FileAdd(c context.Context, fc *pb.FileContent) (*pb.FileID, error) {
f, err := e.fs.New()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer f.Close()
if _, err := f.Write(fc.GetContent()); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
fid, err := e.fs.Add(fc.GetName(), f.Name())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return pb.FileID_builder{
FileID: fid,
}.Build(), nil
}
func (e *execServer) FileDelete(c context.Context, f *pb.FileID) (*emptypb.Empty, error) {
ok := e.fs.Remove(f.GetFileID())
if !ok {
return nil, status.Errorf(codes.NotFound, "file id does not exists: %q", f.GetFileID())
}
return &emptypb.Empty{}, nil
}
func convertPBResponse(r model.Response) (*pb.Response, error) {
res := pb.Response_builder{
RequestID: r.RequestID,
Results: make([]*pb.Response_Result, 0, len(r.Results)),
Error: r.ErrorMsg,
}.Build()
for _, c := range r.Results {
rt, err := convertPBResult(c)
if err != nil {
return nil, err
}
res.SetResults(append(res.GetResults(), rt))
}
return res, nil
}
func convertPBResult(r model.Result) (*pb.Response_Result, error) {
return pb.Response_Result_builder{
Status: pb.Response_Result_StatusType(r.Status),
ExitStatus: int32(r.ExitStatus),
Error: r.Error,
Time: r.Time,
RunTime: r.RunTime,
Memory: r.Memory,
ProcPeak: r.ProcPeak,
Files: r.Buffs,
FileIDs: r.FileIDs,
FileError: convertPBFileError(r.FileError),
}.Build(), nil
}
func convertPBFileError(fe []envexec.FileError) []*pb.Response_FileError {
rt := make([]*pb.Response_FileError, 0, len(fe))
for _, e := range fe {
rt = append(rt, pb.Response_FileError_builder{
Name: e.Name,
Type: pb.Response_FileError_ErrorType(e.Type),
Message: e.Message,
}.Build())
}
return rt
}
func convertPBRequest(r *pb.Request, srcPrefix []string) (req *worker.Request, err error) {
req = &worker.Request{
RequestID: r.GetRequestID(),
Cmd: make([]worker.Cmd, 0, len(r.GetCmd())),
PipeMapping: make([]worker.PipeMap, 0, len(r.GetPipeMapping())),
}
for _, c := range r.GetCmd() {
cm, err := convertPBCmd(c, srcPrefix)
if err != nil {
return nil, err
}
req.Cmd = append(req.Cmd, cm)
}
for _, p := range r.GetPipeMapping() {
pm := convertPBPipeMap(p)
req.PipeMapping = append(req.PipeMapping, pm)
}
return req, nil
}
func convertPBPipeMap(p *pb.Request_PipeMap) worker.PipeMap {
return worker.PipeMap{
In: convertPBPipeIndex(p.GetIn()),
Out: convertPBPipeIndex(p.GetOut()),
Proxy: p.GetProxy(),
Name: p.GetName(),
Limit: worker.Size(p.GetMax()),
}
}
func convertPBPipeIndex(p *pb.Request_PipeMap_PipeIndex) worker.PipeIndex {
return worker.PipeIndex{Index: int(p.GetIndex()), Fd: int(p.GetFd())}
}
func convertPBCmd(c *pb.Request_CmdType, srcPrefix []string) (cm worker.Cmd, err error) {
cm = worker.Cmd{
Args: c.GetArgs(),
Env: c.GetEnv(),
TTY: c.GetTty(),
CPULimit: time.Duration(c.GetCpuTimeLimit()),
ClockLimit: time.Duration(c.GetClockTimeLimit()),
MemoryLimit: envexec.Size(c.GetMemoryLimit()),
StackLimit: envexec.Size(c.GetStackLimit()),
ProcLimit: c.GetProcLimit(),
CPURateLimit: c.GetCpuRateLimit(),
CPUSetLimit: c.GetCpuSetLimit(),
DataSegmentLimit: c.GetDataSegmentLimit(),
AddressSpaceLimit: c.GetAddressSpaceLimit(),
CopyOut: convertCopyOut(c.GetCopyOut()),
CopyOutCached: convertCopyOut(c.GetCopyOutCached()),
CopyOutMax: c.GetCopyOutMax(),
CopyOutDir: c.GetCopyOutDir(),
CopyOutTruncate: c.GetCopyOutTruncate(),
Symlinks: c.GetSymlinks(),
}
for _, f := range c.GetFiles() {
cf, err := convertPBFile(f, srcPrefix)
if err != nil {
return cm, err
}
cm.Files = append(cm.Files, cf)
}
if copyIn := c.GetCopyIn(); copyIn != nil {
cm.CopyIn = make(map[string]worker.CmdFile)
for k, f := range copyIn {
cf, err := convertPBFile(f, srcPrefix)
if err != nil {
return cm, err
}
cm.CopyIn[k] = cf
}
}
return cm, nil
}
func convertPBFile(c *pb.Request_File, srcPrefix []string) (worker.CmdFile, error) {
switch c.WhichFile() {
case 0:
return nil, nil
case pb.Request_File_Local_case:
if len(srcPrefix) > 0 {
ok, err := model.CheckPathPrefixes(c.GetLocal().GetSrc(), srcPrefix)
if err != nil {
return nil, fmt.Errorf("check path prefixes: %w", err)
}
if !ok {
return nil, fmt.Errorf("file outside of prefix: %q, %q", c.GetLocal().GetSrc(), srcPrefix)
}
}
return &worker.LocalFile{Src: c.GetLocal().GetSrc()}, nil
case pb.Request_File_Memory_case:
return &worker.MemoryFile{Content: c.GetMemory().GetContent()}, nil
case pb.Request_File_Cached_case:
return &worker.CachedFile{FileID: c.GetCached().GetFileID()}, nil
case pb.Request_File_Pipe_case:
return &worker.Collector{Name: c.GetPipe().GetName(), Max: envexec.Size(c.GetPipe().GetMax()), Pipe: c.GetPipe().GetPipe()}, nil
}
return nil, fmt.Errorf("request file type not supported: %T", c)
}
func convertCopyOut(copyOut []*pb.Request_CmdCopyOutFile) []worker.CmdCopyOutFile {
rt := make([]worker.CmdCopyOutFile, 0, len(copyOut))
for _, n := range copyOut {
rt = append(rt, worker.CmdCopyOutFile{
Name: n.GetName(),
Optional: n.GetOptional(),
})
}
return rt
}

View File

@ -0,0 +1,176 @@
package grpcexecutor
import (
"errors"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/cmd/go-judge/stream"
"github.com/criyle/go-judge/pb"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
var _ stream.Stream = &streamWrapper{}
type streamWrapper struct {
es pb.Executor_ExecStreamServer
}
func (sw *streamWrapper) Send(r stream.Response) error {
res := &pb.StreamResponse{}
switch {
case r.Response != nil:
resp, err := convertPBResponse(*r.Response)
if err != nil {
return status.Errorf(codes.Aborted, "response: %v", err)
}
res.SetExecResponse(proto.ValueOrDefault(resp))
case r.Output != nil:
res.SetExecOutput(pb.StreamResponse_Output_builder{
Index: uint32(r.Output.Index),
Fd: uint32(r.Output.Fd),
Content: r.Output.Content,
}.Build())
}
return sw.es.Send(res)
}
func (sw *streamWrapper) Recv() (*stream.Request, error) {
req, err := sw.es.Recv()
if err != nil {
return nil, err
}
switch req.WhichRequest() {
case pb.StreamRequest_ExecRequest_case:
return &stream.Request{Request: convertPBStreamRequest(req.GetExecRequest())}, nil
case pb.StreamRequest_ExecInput_case:
return &stream.Request{Input: &stream.InputRequest{
Index: int(req.GetExecInput().GetIndex()),
Fd: int(req.GetExecInput().GetFd()),
Content: req.GetExecInput().GetContent(),
}}, nil
case pb.StreamRequest_ExecResize_case:
return &stream.Request{Resize: &stream.ResizeRequest{
Index: int(req.GetExecResize().GetIndex()),
Fd: int(req.GetExecResize().GetFd()),
Rows: int(req.GetExecResize().GetRows()),
Cols: int(req.GetExecResize().GetCols()),
X: int(req.GetExecResize().GetX()),
Y: int(req.GetExecResize().GetY()),
}}, nil
case pb.StreamRequest_ExecCancel_case:
return &stream.Request{Cancel: &struct{}{}}, nil
}
return nil, errors.ErrUnsupported
}
func convertPBStreamRequest(req *pb.Request) *model.Request {
ret := &model.Request{
RequestID: req.GetRequestID(),
}
for _, cmd := range req.GetCmd() {
ret.Cmd = append(ret.Cmd, model.Cmd{
Args: cmd.GetArgs(),
Env: cmd.GetEnv(),
TTY: cmd.GetTty(),
Files: convertPBStreamFiles(cmd.GetFiles()),
CPULimit: cmd.GetCpuTimeLimit(),
ClockLimit: cmd.GetClockTimeLimit(),
MemoryLimit: cmd.GetMemoryLimit(),
StackLimit: cmd.GetStackLimit(),
ProcLimit: cmd.GetProcLimit(),
CPURateLimit: cmd.GetCpuRateLimit(),
CPUSetLimit: cmd.GetCpuSetLimit(),
DataSegmentLimit: cmd.GetDataSegmentLimit(),
AddressSpaceLimit: cmd.GetAddressSpaceLimit(),
CopyIn: convertPBStreamCopyIn(cmd),
CopyOut: convertStreamCopyOut(cmd.GetCopyOut()),
CopyOutCached: convertStreamCopyOut(cmd.GetCopyOutCached()),
CopyOutMax: cmd.GetCopyOutMax(),
CopyOutDir: cmd.GetCopyOutDir(),
})
}
for _, p := range req.GetPipeMapping() {
ret.PipeMapping = append(ret.PipeMapping, model.PipeMap{
In: convertPBStreamPipeIndex(p.GetIn()),
Out: convertPBStreamPipeIndex(p.GetOut()),
Max: int64(p.GetMax()),
Name: p.GetName(),
Proxy: p.GetProxy(),
})
}
return ret
}
func convertPBStreamPipeIndex(pi *pb.Request_PipeMap_PipeIndex) model.PipeIndex {
return model.PipeIndex{Index: int(pi.GetIndex()), Fd: int(pi.GetFd())}
}
func convertPBStreamFiles(files []*pb.Request_File) []*model.CmdFile {
var rt []*model.CmdFile
for _, f := range files {
if f == nil {
rt = append(rt, nil)
} else {
m := convertPBStreamFile(f)
rt = append(rt, &m)
}
}
return rt
}
func convertPBStreamCopyIn(cmd *pb.Request_CmdType) map[string]model.CmdFile {
rt := make(map[string]model.CmdFile, len(cmd.GetCopyIn())+len(cmd.GetSymlinks()))
for k, i := range cmd.GetCopyIn() {
if !i.HasFile() {
continue
}
rt[k] = convertPBStreamFile(i)
}
for k, v := range cmd.GetSymlinks() {
rt[k] = model.CmdFile{Symlink: &v}
}
return rt
}
func convertPBStreamFile(i *pb.Request_File) model.CmdFile {
switch i.WhichFile() {
case pb.Request_File_Local_case:
return model.CmdFile{Src: proto.String(i.GetLocal().GetSrc())}
case pb.Request_File_Memory_case:
s := byteArrayToString(i.GetMemory().GetContent())
return model.CmdFile{Content: &s}
case pb.Request_File_Cached_case:
return model.CmdFile{FileID: proto.String(i.GetCached().GetFileID())}
case pb.Request_File_Pipe_case:
return model.CmdFile{Name: proto.String(i.GetPipe().GetName()), Max: proto.Int64(i.GetPipe().GetMax()), Pipe: i.GetPipe().GetPipe()}
case pb.Request_File_StreamIn_case:
return model.CmdFile{StreamIn: true}
case pb.Request_File_StreamOut_case:
return model.CmdFile{StreamOut: true}
}
return model.CmdFile{}
}
func convertStreamCopyOut(copyOut []*pb.Request_CmdCopyOutFile) []string {
rt := make([]string, 0, len(copyOut))
for _, n := range copyOut {
name := n.GetName()
if n.GetOptional() {
name += "?"
}
rt = append(rt, name)
}
return rt
}
func (e *execServer) ExecStream(es pb.Executor_ExecStreamServer) error {
w := &streamWrapper{
es: es,
}
if err := stream.Start(es.Context(), w, e.worker, e.srcPrefix, e.logger); err != nil {
return status.Error(codes.Internal, err.Error())
}
return nil
}

View File

@ -0,0 +1,7 @@
package grpcexecutor
import "unsafe"
func byteArrayToString(buf []byte) string {
return *(*string)(unsafe.Pointer(&buf))
}

142
cmd/go-judge/listener.go Normal file
View File

@ -0,0 +1,142 @@
package main
import (
"context"
"net"
"strings"
"syscall"
)
type multiListener struct {
listeners []*net.TCPListener
connChan chan acceptResult
ctx context.Context
cancel context.CancelFunc
}
type acceptResult struct {
conn net.Conn
err error
}
func newListener(addr string) (net.Listener, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
iPort, err := net.LookupPort("tcp", port)
if err != nil {
return nil, err
}
var ips []net.IP
switch host {
case "":
return net.Listen("tcp", addr)
case "localhost":
ips, err = getLocalhostIP()
if err != nil {
return nil, err
}
default:
ips, err = net.LookupIP(host)
if err != nil {
return nil, err
}
}
if len(ips) == 0 {
return net.Listen("tcp", addr)
} else if len(ips) == 1 {
return net.ListenTCP("tcp", &net.TCPAddr{IP: ips[0], Port: iPort})
}
return newMultiListener(ips, iPort)
}
func getLocalhostIP() ([]net.IP, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, err
}
rt := make([]net.IP, 0, 2)
for _, addr := range addrs {
if ip, ok := addr.(*net.IPNet); ok && ip.IP.IsLoopback() {
rt = append(rt, ip.IP)
}
}
return rt, nil
}
func newMultiListener(ips []net.IP, port int) (lis net.Listener, err error) {
listeners := make([]*net.TCPListener, 0, len(ips))
defer func() {
if err != nil {
for _, l := range listeners {
l.Close()
}
}
}()
for _, ip := range ips {
l, err := net.ListenTCP("tcp", &net.TCPAddr{IP: ip, Port: port})
if err != nil {
return nil, err
}
listeners = append(listeners, l)
}
ctx, cancel := context.WithCancel(context.Background())
rt := &multiListener{
listeners: listeners,
connChan: make(chan acceptResult),
ctx: ctx,
cancel: cancel,
}
for _, l := range listeners {
l := l
go func() {
for {
conn, err := l.AcceptTCP()
select {
case rt.connChan <- acceptResult{conn: conn, err: err}:
case <-ctx.Done():
return
}
}
}()
}
return rt, nil
}
func (ml *multiListener) Accept() (net.Conn, error) {
select {
case ar := <-ml.connChan:
return ar.conn, ar.err
case <-ml.ctx.Done():
return nil, syscall.EINVAL
}
}
func (ml *multiListener) Close() error {
ml.cancel()
for _, l := range ml.listeners {
l.Close()
}
return nil
}
func (ml *multiListener) Addr() net.Addr {
return ml.listeners[0].Addr()
}
func printListener(lis net.Listener) string {
switch l := lis.(type) {
case *multiListener:
addrs := make([]string, 0, len(l.listeners))
for _, l := range l.listeners {
addrs = append(addrs, l.Addr().String())
}
return strings.Join(addrs, ",")
default:
return lis.Addr().String()
}
}

621
cmd/go-judge/main.go Normal file
View File

@ -0,0 +1,621 @@
// Command go-judge will starts a http server that receives command to run
// programs inside a sandbox.
package main
import (
"context"
"errors"
"flag"
"fmt"
"log"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"syscall"
"time"
"github.com/criyle/go-judge/cmd/go-judge/config"
grpcexecutor "github.com/criyle/go-judge/cmd/go-judge/grpc_executor"
restexecutor "github.com/criyle/go-judge/cmd/go-judge/rest_executor"
"github.com/criyle/go-judge/cmd/go-judge/version"
wsexecutor "github.com/criyle/go-judge/cmd/go-judge/ws_executor"
"github.com/criyle/go-judge/env"
"github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/pb"
"github.com/criyle/go-judge/worker"
ginzap "github.com/gin-contrib/zap"
"github.com/gin-gonic/gin"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth"
grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
ginprometheus "github.com/zsais/go-gin-prometheus"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zapgrpc"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var logger *zap.Logger
func main() {
conf := loadConf()
if conf.Version {
fmt.Println(version.Version)
return
}
initLogger(conf)
defer logger.Sync()
if ce := logger.Check(zap.InfoLevel, "Config loaded"); ce != nil {
ce.Write(zap.String("config", fmt.Sprintf("%+v", conf)))
}
warnIfNotLinux()
// Init environment pool
fs, fsCleanUp := newFileStore(conf)
b, builderParam := newEnvBuilder(conf)
envPool := newEnvPool(b, conf.EnableMetrics)
prefork(envPool, conf.PreFork)
work := newWorker(conf, envPool, fs)
work.Start()
logger.Info("Worker stated ",
zap.Int("parallelism", conf.Parallelism),
zap.String("dir", conf.Dir),
zap.Duration("timeLimitCheckInterval", conf.TimeLimitCheckerInterval))
initCgroupMetrics(conf, builderParam)
servers := []initFunc{
cleanUpWorker(work),
cleanUpFs(fsCleanUp),
initHTTPServer(conf, work, fs, builderParam),
initMonitorHTTPServer(conf),
initGRPCServer(conf, work, fs),
}
// Gracefully shutdown, with signal / HTTP server / gRPC server / Monitor HTTP server
sig := make(chan os.Signal, 1+len(servers))
// worker and fs clean up func
stops := []stopFunc{}
for _, s := range servers {
start, stop := s()
if start != nil {
go func() {
start()
sig <- os.Interrupt
}()
}
if stop != nil {
stops = append(stops, stop)
}
}
// background force GC worker
newForceGCWorker(conf)
// Graceful shutdown...
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
loop:
for s := range sig {
switch s {
case syscall.SIGINT:
break loop
case syscall.SIGTERM:
if isManagedByPM2() {
logger.Info("running with PM2, received SIGTERM (from systemd), ignoring")
} else {
break loop
}
}
}
signal.Reset(syscall.SIGINT, syscall.SIGTERM)
logger.Info("Shutting Down...")
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*3)
defer cancel()
var eg errgroup.Group
for _, s := range stops {
s := s
eg.Go(func() error {
return s(ctx)
})
}
go func() {
logger.Info("Shutdown Finished", zap.Error(eg.Wait()))
cancel()
}()
<-ctx.Done()
}
func warnIfNotLinux() {
if runtime.GOOS != "linux" {
logger.Warn("Platform is not primarily supported", zap.String("GOOS", runtime.GOOS))
logger.Warn("Please notice that the primary supporting platform is Linux")
logger.Warn("Windows and macOS(darwin) support are only recommended in development environment")
}
}
func loadConf() *config.Config {
var conf config.Config
if err := conf.Load(); err != nil {
if err == flag.ErrHelp {
os.Exit(0)
}
log.Fatalln("load config failed ", err)
}
return &conf
}
type (
stopFunc func(ctx context.Context) error
initFunc func() (start func(), cleanUp stopFunc)
)
func cleanUpWorker(work worker.Worker) initFunc {
return func() (start func(), cleanUp stopFunc) {
return nil, func(ctx context.Context) error {
work.Shutdown()
logger.Info("Worker shutdown")
return nil
}
}
}
func cleanUpFs(fsCleanUp func() error) initFunc {
return func() (start func(), cleanUp stopFunc) {
if fsCleanUp == nil {
return nil, nil
}
return nil, func(ctx context.Context) error {
err := fsCleanUp()
logger.Info("FileStore cleaned up")
return err
}
}
}
func initHTTPServer(conf *config.Config, work worker.Worker, fs filestore.FileStore, builderParam map[string]any) initFunc {
return func() (start func(), cleanUp stopFunc) {
// Init http handle
r := initHTTPMux(conf, work, fs, builderParam)
srv := http.Server{
Addr: conf.HTTPAddr,
Handler: r,
}
return func() {
lis, err := newListener(conf.HTTPAddr)
if err != nil {
logger.Error("Http server listen failed", zap.Error(err))
return
}
logger.Info("Starting http server", zap.String("addr", conf.HTTPAddr), zap.String("listener", printListener(lis)))
if err := srv.Serve(lis); errors.Is(err, http.ErrServerClosed) {
logger.Info("Http server stopped", zap.Error(err))
} else {
logger.Error("Http server stopped", zap.Error(err))
}
}, func(ctx context.Context) error {
logger.Info("Http server shutting down")
return srv.Shutdown(ctx)
}
}
}
func initMonitorHTTPServer(conf *config.Config) initFunc {
return func() (start func(), cleanUp stopFunc) {
// Init monitor HTTP server
mr := initMonitorHTTPMux(conf)
if mr == nil {
return nil, nil
}
msrv := http.Server{
Addr: conf.MonitorAddr,
Handler: mr,
}
return func() {
lis, err := newListener(conf.MonitorAddr)
if err != nil {
logger.Error("Monitoring http listen failed", zap.Error(err))
return
}
logger.Info("Starting monitoring http server", zap.String("addr", conf.MonitorAddr), zap.String("listener", printListener(lis)))
logger.Info("Monitoring http server stopped", zap.Error(msrv.Serve(lis)))
}, func(ctx context.Context) error {
logger.Info("Monitoring http server shutdown")
return msrv.Shutdown(ctx)
}
}
}
func initGRPCServer(conf *config.Config, work worker.Worker, fs filestore.FileStore) initFunc {
return func() (start func(), cleanUp stopFunc) {
if !conf.EnableGRPC {
return nil, nil
}
// Init gRPC server
esServer := grpcexecutor.New(work, fs, conf.SrcPrefix, logger)
grpcServer := newGRPCServer(conf, esServer)
return func() {
lis, err := newListener(conf.GRPCAddr)
if err != nil {
logger.Error("gRPC listen failed: ", zap.Error(err))
return
}
logger.Info("Starting gRPC server", zap.String("addr", conf.GRPCAddr), zap.String("listener", printListener(lis)))
logger.Info("gRPC server stopped", zap.Error(grpcServer.Serve(lis)))
}, func(ctx context.Context) error {
grpcServer.GracefulStop()
logger.Info("GRPC server shutdown")
return nil
}
}
}
func initLogger(conf *config.Config) {
if conf.Silent {
logger = zap.NewNop()
return
}
var err error
if conf.Release {
logger, err = zap.NewProduction()
} else {
config := zap.NewDevelopmentConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
if !conf.EnableDebug {
config.Level.SetLevel(zap.InfoLevel)
}
logger, err = config.Build()
}
if err != nil {
log.Fatalln("init logger failed ", err)
}
}
func prefork(envPool worker.EnvironmentPool, prefork int) {
if prefork <= 0 {
return
}
logger.Info("Create prefork containers", zap.Int("count", prefork))
m := make([]envexec.Environment, 0, prefork)
for i := 0; i < prefork; i++ {
e, err := envPool.Get()
if err != nil {
log.Fatalln("prefork environment failed ", err)
}
m = append(m, e)
}
for _, e := range m {
envPool.Put(e)
}
}
func initHTTPMux(conf *config.Config, work worker.Worker, fs filestore.FileStore, builderParam map[string]any) http.Handler {
var r *gin.Engine
if conf.Release {
gin.SetMode(gin.ReleaseMode)
}
r = gin.New()
r.Use(ginzap.Ginzap(logger, "", false))
r.Use(ginzap.RecoveryWithZap(logger, true))
// Metrics Handle
if conf.EnableMetrics {
initGinMetrics(r)
}
// Version handle
r.GET("/version", generateHandleVersion(conf, builderParam))
// Config handle
r.GET("/config", generateHandleConfig(conf, builderParam))
// Add auth token
if conf.AuthToken != "" {
r.Use(tokenAuth(conf.AuthToken))
logger.Info("Attach token auth", zap.String("token", conf.AuthToken))
}
// Rest Handle
cmdHandle := restexecutor.NewCmdHandle(work, conf.SrcPrefix, logger)
cmdHandle.Register(r)
fileHandle := restexecutor.NewFileHandle(fs)
fileHandle.Register(r)
// WebSocket Handle
wsHandle := wsexecutor.New(work, conf.SrcPrefix, logger)
wsHandle.Register(r)
return r
}
func initMonitorHTTPMux(conf *config.Config) http.Handler {
if !conf.EnableMetrics && !conf.EnableDebug {
return nil
}
mux := http.NewServeMux()
if conf.EnableMetrics {
mux.Handle("/metrics", promhttp.Handler())
}
if conf.EnableDebug {
initDebugRoute(mux)
}
return mux
}
func initDebugRoute(mux *http.ServeMux) {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
func InterceptorLogger(l *zap.Logger) grpc_logging.Logger {
return grpc_logging.LoggerFunc(func(ctx context.Context, lvl grpc_logging.Level, msg string, fields ...any) {
f := make([]zap.Field, 0, len(fields)/2)
for i := 0; i < len(fields); i += 2 {
key := fields[i]
value := fields[i+1]
switch v := value.(type) {
case string:
f = append(f, zap.String(key.(string), v))
case int:
f = append(f, zap.Int(key.(string), v))
case bool:
f = append(f, zap.Bool(key.(string), v))
default:
f = append(f, zap.Any(key.(string), v))
}
}
logger := l.WithOptions(zap.AddCallerSkip(1)).With(f...)
switch lvl {
case grpc_logging.LevelDebug:
logger.Debug(msg)
case grpc_logging.LevelInfo:
logger.Info(msg)
case grpc_logging.LevelWarn:
logger.Warn(msg)
case grpc_logging.LevelError:
logger.Error(msg)
default:
panic(fmt.Sprintf("unknown level %v", lvl))
}
})
}
func newGRPCServer(conf *config.Config, esServer pb.ExecutorServer) *grpc.Server {
prom := grpc_prometheus.NewServerMetrics(grpc_prometheus.WithServerHandlingTimeHistogram())
grpclog.SetLoggerV2(zapgrpc.NewLogger(logger))
streamMiddleware := []grpc.StreamServerInterceptor{
prom.StreamServerInterceptor(),
grpc_logging.StreamServerInterceptor(InterceptorLogger(logger)),
grpc_recovery.StreamServerInterceptor(),
}
unaryMiddleware := []grpc.UnaryServerInterceptor{
prom.UnaryServerInterceptor(),
grpc_logging.UnaryServerInterceptor(InterceptorLogger(logger)),
grpc_recovery.UnaryServerInterceptor(),
}
if conf.AuthToken != "" {
authFunc := grpcTokenAuth(conf.AuthToken)
streamMiddleware = append(streamMiddleware, grpc_auth.StreamServerInterceptor(authFunc))
unaryMiddleware = append(unaryMiddleware, grpc_auth.UnaryServerInterceptor(authFunc))
}
grpcServer := grpc.NewServer(
grpc.ChainStreamInterceptor(streamMiddleware...),
grpc.ChainUnaryInterceptor(unaryMiddleware...),
grpc.MaxRecvMsgSize(int(conf.GRPCMsgSize.Byte())),
)
pb.RegisterExecutorServer(grpcServer, esServer)
prometheus.MustRegister(prom)
return grpcServer
}
func initGinMetrics(r *gin.Engine) {
p := ginprometheus.NewWithConfig(ginprometheus.Config{
Subsystem: "gin",
DisableBodyReading: true,
})
p.ReqCntURLLabelMappingFn = func(c *gin.Context) string {
return c.FullPath()
}
r.Use(p.HandlerFunc())
}
func tokenAuth(token string) gin.HandlerFunc {
const bearer = "Bearer "
return func(c *gin.Context) {
reqToken := c.GetHeader("Authorization")
if strings.HasPrefix(reqToken, bearer) && reqToken[len(bearer):] == token {
c.Next()
return
}
c.AbortWithStatus(http.StatusUnauthorized)
}
}
func grpcTokenAuth(token string) func(context.Context) (context.Context, error) {
return func(ctx context.Context) (context.Context, error) {
reqToken, err := grpc_auth.AuthFromMD(ctx, "bearer")
if err != nil {
return nil, err
}
if reqToken != token {
return nil, status.Errorf(codes.Unauthenticated, "invalid auth token: %v", err)
}
return ctx, nil
}
}
func newFileStore(conf *config.Config) (filestore.FileStore, func() error) {
const timeoutCheckInterval = 15 * time.Second
var cleanUp func() error
var fs filestore.FileStore
if conf.Dir == "" {
if runtime.GOOS == "linux" {
conf.Dir = "/dev/shm"
} else {
conf.Dir = os.TempDir()
}
var err error
conf.Dir = filepath.Join(conf.Dir, "go-judge")
err = os.Mkdir(conf.Dir, os.ModePerm)
if err != nil && !errors.Is(err, os.ErrExist) {
logger.Fatal("Failed to create file store default dir", zap.Error(err))
}
cleanUp = func() error {
return os.RemoveAll(conf.Dir)
}
}
os.MkdirAll(conf.Dir, 0o755)
fs = filestore.NewFileLocalStore(conf.Dir)
if conf.EnableMetrics {
fs = newMetricsFileStore(fs)
}
if conf.FileTimeout > 0 {
fs = filestore.NewTimeout(fs, conf.FileTimeout, timeoutCheckInterval)
}
return fs, cleanUp
}
func newEnvBuilder(conf *config.Config) (pool.EnvBuilder, map[string]any) {
b, param, err := env.NewBuilder(env.Config{
ContainerInitPath: conf.ContainerInitPath,
MountConf: conf.MountConf,
TmpFsParam: conf.TmpFsParam,
NetShare: conf.NetShare,
CgroupPrefix: conf.CgroupPrefix,
Cpuset: conf.Cpuset,
ContainerCredStart: conf.ContainerCredStart,
EnableCPURate: conf.EnableCPURate,
CPUCfsPeriod: conf.CPUCfsPeriod,
SeccompConf: conf.SeccompConf,
NoFallback: conf.NoFallback,
}, logger)
if err != nil {
logger.Fatal("create environment builder failed ", zap.Error(err))
}
if conf.EnableMetrics {
b = &metricsEnvBuilder{b}
}
return b, param
}
func newEnvPool(b pool.EnvBuilder, enableMetrics bool) worker.EnvironmentPool {
p := pool.NewPool(b)
if enableMetrics {
p = &metricsEnvPool{p}
}
return p
}
func newWorker(conf *config.Config, envPool worker.EnvironmentPool, fs filestore.FileStore) worker.Worker {
w := worker.New(worker.Config{
FileStore: fs,
EnvironmentPool: envPool,
Parallelism: conf.Parallelism,
WorkDir: conf.Dir,
TimeLimitTickInterval: conf.TimeLimitCheckerInterval,
ExtraMemoryLimit: *conf.ExtraMemoryLimit,
OutputLimit: *conf.OutputLimit,
CopyOutLimit: *conf.CopyOutLimit,
OpenFileLimit: uint64(conf.OpenFileLimit),
ExecObserver: execObserve,
})
if conf.EnableMetrics {
w = newMetricsWorker(w)
}
return w
}
func newForceGCWorker(conf *config.Config) {
go func() {
ticker := time.NewTicker(conf.ForceGCInterval)
for {
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
if mem.HeapInuse > uint64(*conf.ForceGCTarget) {
logger.Info("Force GC as heap_in_use > target",
zap.Stringer("heap_in_use", envexec.Size(mem.HeapInuse)),
zap.Stringer("target", *conf.ForceGCTarget))
runtime.GC()
debug.FreeOSMemory()
}
<-ticker.C
}
}()
}
func generateHandleVersion(_ *config.Config, _ map[string]any) func(*gin.Context) {
return func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"buildVersion": version.Version,
"goVersion": runtime.Version(),
"platform": runtime.GOARCH,
"os": runtime.GOOS,
"copyOutOptional": true,
"pipeProxy": true,
"symlink": true,
"addressSpaceLimit": true,
"stream": true,
"procPeak": true,
"copyOutTruncate": true,
})
}
}
func generateHandleConfig(conf *config.Config, builderParam map[string]any) func(*gin.Context) {
return func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"copyOutOptional": true,
"pipeProxy": true,
"symlink": true,
"addressSpaceLimit": true,
"stream": true,
"procPeak": true,
"copyOutTruncate": true,
"fileStorePath": conf.Dir,
"runnerConfig": builderParam,
})
}
}
func isManagedByPM2() bool {
// List of environment variables that pm2 typically sets.
pm2EnvVars := []string{
"PM2_HOME",
"PM2_JSON_PROCESSING",
"NODE_APP_INSTANCE",
}
for _, v := range pm2EnvVars {
if os.Getenv(v) != "" {
return true
}
}
return false
}

266
cmd/go-judge/metrics.go Normal file
View File

@ -0,0 +1,266 @@
package main
import (
"os"
"sync"
"github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/worker"
"github.com/prometheus/client_golang/prometheus"
)
const (
metricsNamespace = "go_judge"
execSubsystem = "exec"
filestoreSubsystem = "file"
environmentSubsystem = "environment"
workerSubsystem = "worker"
)
var (
// 1ms -> 100s
timeBuckets = []float64{
0.001, 0.002, 0.005, 0.010, 0.025, 0.050, 0.1, 0.2,
0.4, 0.8, 1.0, 2, 5, 10, 20, 50, 100,
}
// 4k (1<<12) -> 4g (1<<32)
memoryBucket = prometheus.ExponentialBuckets(1<<12, 2, 21)
// 256 byte (1<<8) -> 256m (1<<28)
fileSizeBucket = prometheus.ExponentialBuckets(1<<8, 2, 20)
execErrorCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: execSubsystem,
Name: "error_count",
Help: "Number of exec query returns error",
})
execTimeHist = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: execSubsystem,
Name: "time_seconds",
Help: "Histogram for the command execution time",
Buckets: timeBuckets,
}, []string{"status"})
execMemHist = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: execSubsystem,
Name: "memory_bytes",
Help: "Histogram for the command execution max memory",
Buckets: memoryBucket,
}, []string{"status"})
fsSizeHist = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: filestoreSubsystem,
Name: "size_bytes",
Help: "Histogram for the file size created in the file store",
Buckets: fileSizeBucket,
})
fsCurrentTotalCount = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Subsystem: filestoreSubsystem,
Name: "current_bytes_count",
Help: "Total number of current files in the file store",
})
fsCurrentTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Subsystem: filestoreSubsystem,
Name: "current_bytes_sum",
Help: "Total size of current files in the file store",
})
envCreated = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: environmentSubsystem,
Name: "count",
Help: "Total number of environment build by environment builder",
})
envInUse = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Subsystem: environmentSubsystem,
Name: "current_count",
Help: "Total number of environment currently in use",
})
workerQueue = prometheus.NewDesc(
prometheus.BuildFQName(metricsNamespace, workerSubsystem, "queue_count"),
"Number of requests waiting in worker queue", nil, nil,
)
workerRunning = prometheus.NewDesc(
prometheus.BuildFQName(metricsNamespace, workerSubsystem, "running_count"),
"Number of request running by workers", nil, nil,
)
)
func init() {
prometheus.MustRegister(execErrorCount)
prometheus.MustRegister(execTimeHist)
prometheus.MustRegister(execMemHist)
prometheus.MustRegister(fsSizeHist, fsCurrentTotalCount, fsCurrentTotalSize)
prometheus.MustRegister(envCreated, envInUse)
}
func execObserve(res worker.Response) {
if res.Error != nil {
execErrorCount.Inc()
}
for _, r := range res.Results {
status := r.Status.String()
time := r.Time.Seconds()
memory := float64(r.Memory)
execTimeHist.WithLabelValues(status).Observe(time)
execMemHist.WithLabelValues(status).Observe(memory)
}
}
var _ filestore.FileStore = &metricsFileStore{}
type metricsFileStore struct {
mu sync.Mutex
filestore.FileStore
fileSize map[string]int64
}
func newMetricsFileStore(fs filestore.FileStore) filestore.FileStore {
store := &metricsFileStore{
FileStore: fs,
fileSize: make(map[string]int64),
}
fi := store.List()
for id := range fi {
_, file := store.Get(id)
if file == nil {
continue
}
if f, ok := file.(*envexec.FileInput); ok {
info, err := os.Stat(f.Path)
if err != nil {
continue
}
store.fileSize[id] = info.Size()
sf := float64(info.Size())
fsSizeHist.Observe(sf)
fsCurrentTotalSize.Add(sf)
fsCurrentTotalCount.Inc()
}
}
return store
}
func (m *metricsFileStore) Add(name, path string) (string, error) {
id, err := m.FileStore.Add(name, path)
if err != nil {
return "", err
}
fi, err := os.Stat(path)
if err != nil {
return id, nil
}
m.mu.Lock()
defer m.mu.Unlock()
s := fi.Size()
m.fileSize[id] = s
sf := float64(s)
fsSizeHist.Observe(sf)
fsCurrentTotalSize.Add(sf)
fsCurrentTotalCount.Inc()
return id, nil
}
func (m *metricsFileStore) Remove(id string) bool {
success := m.FileStore.Remove(id)
m.mu.Lock()
defer m.mu.Unlock()
s, ok := m.fileSize[id]
if !ok {
return success
}
delete(m.fileSize, id)
sf := float64(s)
fsCurrentTotalSize.Sub(sf)
fsCurrentTotalCount.Dec()
return success
}
var _ pool.EnvBuilder = &metricsEnvBuilder{}
type metricsEnvBuilder struct {
pool.EnvBuilder
}
func (b *metricsEnvBuilder) Build() (pool.Environment, error) {
e, err := b.EnvBuilder.Build()
if err != nil {
return nil, err
}
envCreated.Inc()
return e, nil
}
var _ worker.EnvironmentPool = &metricsEnvPool{}
type metricsEnvPool struct {
worker.EnvironmentPool
}
func (p *metricsEnvPool) Get() (envexec.Environment, error) {
e, err := p.EnvironmentPool.Get()
if err != nil {
return nil, err
}
envInUse.Inc()
return e, nil
}
func (p *metricsEnvPool) Put(env envexec.Environment) {
p.EnvironmentPool.Put(env)
envInUse.Dec()
}
var _ worker.Worker = &metricsWorker{}
var _ prometheus.Collector = &metricsWorker{}
type metricsWorker struct {
worker.Worker
}
// Collect implements prometheus.Collector.
func (m *metricsWorker) Collect(ch chan<- prometheus.Metric) {
s := m.Stat()
ch <- prometheus.MustNewConstMetric(
workerQueue, prometheus.GaugeValue, float64(s.Queue),
)
ch <- prometheus.MustNewConstMetric(
workerRunning, prometheus.GaugeValue, float64(s.Running),
)
}
// Describe implements prometheus.Collector.
func (m *metricsWorker) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(m, ch)
}
func newMetricsWorker(w worker.Worker) worker.Worker {
rt := &metricsWorker{w}
prometheus.MustRegister(rt)
return rt
}

View File

@ -0,0 +1,115 @@
package main
import (
"path/filepath"
"time"
"github.com/criyle/go-judge/cmd/go-judge/config"
"github.com/criyle/go-sandbox/pkg/cgroup"
"github.com/prometheus/client_golang/prometheus"
)
const cgroupSubsystem = "cgroup"
var _ prometheus.Collector = &cgroupMetrics{}
type cgroupMetrics struct {
cgroup cgroup.Cgroup
cgroupCPU *prometheus.Desc
cgroupMemory *prometheus.Desc
cgroupMaxMemory *prometheus.Desc
}
// Collect implements prometheus.Collector.
func (c *cgroupMetrics) Collect(ch chan<- prometheus.Metric) {
if u, err := c.cgroup.CPUUsage(); err == nil {
ch <- prometheus.MustNewConstMetric(
c.cgroupCPU, prometheus.CounterValue, time.Duration(u).Seconds(),
)
}
if m, err := c.cgroup.MemoryUsage(); err == nil {
ch <- prometheus.MustNewConstMetric(
c.cgroupMemory, prometheus.GaugeValue, float64(m),
)
}
if m, err := c.cgroup.MemoryMaxUsage(); err == nil {
ch <- prometheus.MustNewConstMetric(
c.cgroupMaxMemory, prometheus.GaugeValue, float64(m),
)
}
}
// Describe implements prometheus.Collector.
func (c *cgroupMetrics) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(c, ch)
}
func newCgroupMetrics(cg cgroup.Cgroup, label string) *cgroupMetrics {
cgroupCPU := prometheus.NewDesc(
prometheus.BuildFQName(metricsNamespace, cgroupSubsystem, "cpu_seconds"),
"CPU usage of the cgroup", nil, prometheus.Labels{"type": label},
)
cgroupMemory := prometheus.NewDesc(
prometheus.BuildFQName(metricsNamespace, cgroupSubsystem, "memory_bytes"),
"Memory usage of the cgroup", nil, prometheus.Labels{"type": label},
)
cgroupMaxMemory := prometheus.NewDesc(
prometheus.BuildFQName(metricsNamespace, cgroupSubsystem, "memory_max_bytes"),
"Maximum memory usage of the cgroup", nil, prometheus.Labels{"type": label},
)
rt := &cgroupMetrics{
cgroup: cg,
cgroupCPU: cgroupCPU,
cgroupMemory: cgroupMemory,
cgroupMaxMemory: cgroupMaxMemory,
}
prometheus.MustRegister(rt)
return rt
}
func initCgroupMetrics(conf *config.Config, param map[string]any) {
if !conf.EnableMetrics {
return
}
t, ok := param["cgroupType"]
if !ok {
return
}
ct, ok := t.(int)
if !ok {
return
}
if ct != cgroup.TypeV1 && ct != cgroup.TypeV2 {
return
}
prefix, err := cgroup.GetCurrentCgroupPrefix()
if err != nil {
return
}
// current cgroup is xxx/api, get the dir
prefix = filepath.Dir(prefix)
control, err := cgroup.GetAvailableControllerWithPrefix(prefix)
if err != nil {
return
}
cg, err := cgroup.New(prefix, control)
if err != nil {
return
}
newCgroupMetrics(cg, "all")
apiCg, err := cg.New("api")
if err != nil {
return
}
newCgroupMetrics(apiCg, "controller")
containersCg, err := cgroup.New(filepath.Join(prefix, "containers"), control)
if err != nil {
return
}
newCgroupMetrics(containersCg, "containers")
}

View File

@ -0,0 +1,8 @@
//go:build !linux
package main
import "github.com/criyle/go-judge/cmd/go-judge/config"
func initCgroupMetrics(conf *config.Config, param map[string]any) {
}

416
cmd/go-judge/model/model.go Normal file
View File

@ -0,0 +1,416 @@
package model
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/worker"
)
// FileError defines the location, file name and the detailed message for a failed file operation
type FileError = envexec.FileError
// FileErrorType defines the location that file operation fails
type FileErrorType = envexec.FileErrorType
// CmdFile defines file from multiple source including local / memory / cached or pipe collector
type CmdFile struct {
Src *string `json:"src"`
Content *string `json:"content"`
FileID *string `json:"fileId"`
Name *string `json:"name"`
Max *int64 `json:"max"`
Symlink *string `json:"symlink"`
StreamIn bool `json:"streamIn"`
StreamOut bool `json:"streamOut"`
Pipe bool `json:"pipe"`
}
// Cmd defines command and limits to start a program using in envexec
type Cmd struct {
Args []string `json:"args"`
Env []string `json:"env,omitempty"`
Files []*CmdFile `json:"files,omitempty"`
CPULimit uint64 `json:"cpuLimit"`
RealCPULimit uint64 `json:"realCpuLimit"`
ClockLimit uint64 `json:"clockLimit"`
MemoryLimit uint64 `json:"memoryLimit"`
StackLimit uint64 `json:"stackLimit"`
ProcLimit uint64 `json:"procLimit"`
CPURateLimit uint64 `json:"cpuRateLimit"`
CPUSetLimit string `json:"cpuSetLimit"`
CopyIn map[string]CmdFile `json:"copyIn"`
CopyOut []string `json:"copyOut"`
CopyOutCached []string `json:"copyOutCached"`
CopyOutMax uint64 `json:"copyOutMax"`
CopyOutDir string `json:"copyOutDir"`
CopyOutTruncate bool `json:"copyOutTruncate"`
TTY bool `json:"tty,omitempty"`
StrictMemoryLimit bool `json:"strictMemoryLimit"`
DataSegmentLimit bool `json:"dataSegmentLimit"`
AddressSpaceLimit bool `json:"addressSpaceLimit"`
}
// PipeIndex defines indexing for a pipe fd
type PipeIndex struct {
Index int `json:"index"`
Fd int `json:"fd"`
}
// PipeMap defines in / out pipe for multiple program
type PipeMap struct {
In PipeIndex `json:"in"`
Out PipeIndex `json:"out"`
Name string `json:"name"`
Max int64 `json:"max"`
Proxy bool `json:"proxy"`
}
// Request defines single worker request
type Request struct {
RequestID string `json:"requestId"`
Cmd []Cmd `json:"cmd"`
PipeMapping []PipeMap `json:"pipeMapping"`
}
// Status offers JSON marshal for envexec.Status
type Status envexec.Status
// String converts status to string
func (s Status) String() string {
return envexec.Status(s).String()
}
// MarshalJSON convert status into string
func (s Status) MarshalJSON() ([]byte, error) {
return []byte("\"" + envexec.Status(s).String() + "\""), nil
}
// UnmarshalJSON convert string into status
func (s *Status) UnmarshalJSON(b []byte) error {
str := string(b)
if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
return fmt.Errorf("invalid status string: %s", str)
}
// remove quotes
v, err := envexec.StringToStatus(str[1 : len(str)-1])
if err != nil {
return err
}
*s = Status(v)
return nil
}
// Result defines single command result
type Result struct {
Status Status `json:"status"`
ExitStatus int `json:"exitStatus"`
Error string `json:"error,omitempty"`
Time uint64 `json:"time"`
Memory uint64 `json:"memory"`
RunTime uint64 `json:"runTime"`
ProcPeak uint64 `json:"procPeak,omitempty"`
Files map[string]string `json:"files,omitempty"`
FileIDs map[string]string `json:"fileIds,omitempty"`
FileError []FileError `json:"fileError,omitempty"`
files []string
Buffs map[string][]byte `json:"-"`
}
func (r Result) String() string {
type Result struct {
Status Status
ExitStatus int
Error string
Time time.Duration
RunTime time.Duration
ProcPeak uint64
Memory envexec.Size
Files map[string]string
FileIDs map[string]string
FileError []FileError
}
d := Result{
Status: r.Status,
ExitStatus: r.ExitStatus,
Error: r.Error,
Time: time.Duration(r.Time),
RunTime: time.Duration(r.RunTime),
Memory: envexec.Size(r.Memory),
ProcPeak: r.ProcPeak,
Files: make(map[string]string),
FileIDs: r.FileIDs,
FileError: r.FileError,
}
for k, v := range r.Files {
d.Files[k] = "len:" + strconv.Itoa(len(v))
}
return fmt.Sprintf("%+v", d)
}
// Response defines worker response for single request
type Response struct {
RequestID string `json:"requestId"`
Results []Result `json:"results"`
ErrorMsg string `json:"error,omitempty"`
mmap bool
}
// Close need to be called when mmap specified to be true
func (r *Response) Close() {
if !r.mmap {
return
}
for _, res := range r.Results {
res.Close()
}
}
// Close need to be called when mmap specified to be true
func (r *Result) Close() {
// remove temporary files
for _, f := range r.files {
os.Remove(f)
}
// remove potential mmap
for _, b := range r.Buffs {
releaseByte(b)
}
}
// ConvertResponse converts
func ConvertResponse(r worker.Response, mmap bool) (ret Response, err error) {
// in error case, release all resources
defer func() {
if err != nil {
for _, r := range ret.Results {
r.Close()
}
for _, r := range r.Results {
for _, f := range r.Files {
f.Close()
os.Remove(f.Name())
}
}
}
// if no mmap required, close all files
if !mmap {
for _, r := range ret.Results {
r.Close()
}
}
}()
ret = Response{
RequestID: r.RequestID,
Results: make([]Result, 0, len(r.Results)),
mmap: mmap,
}
for _, r := range r.Results {
res, err := convertResult(r, mmap)
if err != nil {
return ret, err
}
ret.Results = append(ret.Results, res)
}
if r.Error != nil {
ret.ErrorMsg = r.Error.Error()
}
return ret, nil
}
// ConvertRequest converts json request into worker request
func ConvertRequest(r *Request, srcPrefix []string) (*worker.Request, error) {
req := &worker.Request{
RequestID: r.RequestID,
Cmd: make([]worker.Cmd, 0, len(r.Cmd)),
PipeMapping: make([]worker.PipeMap, 0, len(r.PipeMapping)),
}
for _, c := range r.Cmd {
wc, err := convertCmd(c, srcPrefix)
if err != nil {
return nil, err
}
req.Cmd = append(req.Cmd, wc)
}
for _, p := range r.PipeMapping {
req.PipeMapping = append(req.PipeMapping, convertPipe(p))
}
return req, nil
}
func convertResult(r worker.Result, mmap bool) (Result, error) {
res := Result{
Status: Status(r.Status),
ExitStatus: r.ExitStatus,
Error: r.Error,
Time: uint64(r.Time),
RunTime: uint64(r.RunTime),
Memory: uint64(r.Memory),
ProcPeak: r.ProcPeak,
FileIDs: r.FileIDs,
FileError: r.FileError,
}
if r.Files != nil {
res.Files = make(map[string]string)
res.Buffs = make(map[string][]byte)
for k, f := range r.Files {
b, err := fileToByte(f, mmap)
if err != nil {
return res, err
}
res.Files[k] = byteArrayToString(b)
res.files = append(res.files, f.Name())
res.Buffs[k] = b
}
}
return res, nil
}
func convertPipe(p PipeMap) worker.PipeMap {
return worker.PipeMap{
In: worker.PipeIndex{
Index: p.In.Index,
Fd: p.In.Fd,
},
Out: worker.PipeIndex{
Index: p.Out.Index,
Fd: p.Out.Fd,
},
Proxy: p.Proxy,
Name: p.Name,
Limit: worker.Size(p.Max),
}
}
func convertCmd(c Cmd, srcPrefix []string) (worker.Cmd, error) {
clockLimit := c.ClockLimit
if c.RealCPULimit > 0 {
clockLimit = c.RealCPULimit
}
w := worker.Cmd{
Args: c.Args,
Env: c.Env,
Files: make([]worker.CmdFile, 0, len(c.Files)),
TTY: c.TTY,
CPULimit: time.Duration(c.CPULimit),
ClockLimit: time.Duration(clockLimit),
MemoryLimit: envexec.Size(c.MemoryLimit),
StackLimit: envexec.Size(c.StackLimit),
ProcLimit: c.ProcLimit,
CPURateLimit: c.CPURateLimit,
CPUSetLimit: c.CPUSetLimit,
DataSegmentLimit: c.DataSegmentLimit || c.StrictMemoryLimit,
AddressSpaceLimit: c.AddressSpaceLimit,
CopyOut: convertCopyOut(c.CopyOut),
CopyOutCached: convertCopyOut(c.CopyOutCached),
CopyOutMax: c.CopyOutMax,
CopyOutDir: c.CopyOutDir,
CopyOutTruncate: c.CopyOutTruncate,
}
for _, f := range c.Files {
cf, err := convertCmdFile(f, srcPrefix)
if err != nil {
return w, err
}
w.Files = append(w.Files, cf)
}
if c.CopyIn != nil {
w.CopyIn = make(map[string]worker.CmdFile)
w.Symlinks = make(map[string]string)
for k, f := range c.CopyIn {
if f.Symlink != nil {
w.Symlinks[k] = *f.Symlink
continue
}
cf, err := convertCmdFile(&f, srcPrefix)
if err != nil {
return w, err
}
w.CopyIn[k] = cf
}
}
return w, nil
}
func convertCmdFile(f *CmdFile, srcPrefix []string) (worker.CmdFile, error) {
switch {
case f == nil:
return nil, nil
case f.Src != nil:
if len(srcPrefix) != 0 {
ok, err := CheckPathPrefixes(*f.Src, srcPrefix)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("file (%s) does not under (%s)", *f.Src, srcPrefix)
}
}
return &worker.LocalFile{Src: *f.Src}, nil
case f.Content != nil:
return &worker.MemoryFile{Content: strToBytes(*f.Content)}, nil
case f.FileID != nil:
return &worker.CachedFile{FileID: *f.FileID}, nil
case f.Max != nil && f.Name != nil:
return &worker.Collector{Name: *f.Name, Max: envexec.Size(*f.Max), Pipe: f.Pipe}, nil
default:
return nil, fmt.Errorf("file type is not valid for cmd: %v", f)
}
}
// CheckPathPrefixes ensure path is allowed by prefixes
func CheckPathPrefixes(path string, prefixes []string) (bool, error) {
for _, p := range prefixes {
ok, err := checkPathPrefix(path, p)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
func checkPathPrefix(path, prefix string) (bool, error) {
if filepath.IsAbs(path) {
return strings.HasPrefix(filepath.Clean(path), prefix), nil
}
wd, err := os.Getwd()
if err != nil {
return false, err
}
return strings.HasPrefix(filepath.Join(wd, path), prefix), nil
}
const optionalSuffix = "?"
func convertCopyOut(copyOut []string) []worker.CmdCopyOutFile {
rt := make([]worker.CmdCopyOutFile, 0, len(copyOut))
for _, n := range copyOut {
if strings.HasSuffix(n, optionalSuffix) {
rt = append(rt, worker.CmdCopyOutFile{
Name: strings.TrimSuffix(n, optionalSuffix),
Optional: true,
})
continue
}
rt = append(rt, worker.CmdCopyOutFile{
Name: n,
})
}
return rt
}

View File

@ -0,0 +1,229 @@
package model
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/criyle/go-judge/worker"
)
func TestStatus_MarshalUnmarshalJSON(t *testing.T) {
type wrap struct {
Status Status `json:"status"`
}
orig := wrap{Status: 1}
data, err := json.Marshal(orig)
if err != nil {
t.Fatalf("Marshal error: %v", err)
}
var got wrap
if err := json.Unmarshal(data, &got); err != nil {
t.Fatalf("Unmarshal error: %v", err)
}
if got.Status != orig.Status {
t.Errorf("got %v, want %v", got.Status, orig.Status)
}
}
func TestStatus_UnmarshalJSON_Invalid(t *testing.T) {
var s Status
err := s.UnmarshalJSON([]byte(`"not_a_status"`))
if err == nil {
t.Error("expected error for invalid status string")
}
}
func TestConvertCopyOut(t *testing.T) {
in := []string{"foo.txt", "bar.txt?"}
out := convertCopyOut(in)
if len(out) != 2 {
t.Fatalf("expected 2, got %d", len(out))
}
if out[0].Name != "foo.txt" || out[0].Optional {
t.Errorf("unexpected: %+v", out[0])
}
if out[1].Name != "bar.txt" || !out[1].Optional {
t.Errorf("unexpected: %+v", out[1])
}
}
func TestCheckPathPrefixes(t *testing.T) {
tmp := t.TempDir()
abs := filepath.Join(tmp, "file.txt")
os.WriteFile(abs, []byte("x"), 0644)
ok, err := CheckPathPrefixes(abs, []string{tmp})
if err != nil {
t.Fatalf("CheckPathPrefixes error: %v", err)
}
if !ok {
t.Errorf("expected true for prefix match")
}
ok, err = CheckPathPrefixes(abs, []string{"/not/a/prefix"})
if err != nil {
t.Fatalf("CheckPathPrefixes error: %v", err)
}
if ok {
t.Errorf("expected false for non-matching prefix")
}
}
func TestConvertCmdFile_Local(t *testing.T) {
src := "/tmp/foo"
f := &CmdFile{Src: &src}
_, err := convertCmdFile(f, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestConvertCmdFile_Content(t *testing.T) {
content := "abc"
f := &CmdFile{Content: &content}
cf, err := convertCmdFile(f, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if cf == nil {
t.Error("expected non-nil CmdFile")
}
}
func TestConvertCmdFile_FileID(t *testing.T) {
id := "id"
f := &CmdFile{FileID: &id}
cf, err := convertCmdFile(f, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if cf == nil {
t.Error("expected non-nil CmdFile")
}
}
func TestConvertCmdFile_Collector(t *testing.T) {
name := "out"
max := int64(123)
f := &CmdFile{Name: &name, Max: &max}
cf, err := convertCmdFile(f, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if cf == nil {
t.Error("expected non-nil CmdFile")
}
}
func TestConvertCmdFile_Invalid(t *testing.T) {
f := &CmdFile{}
_, err := convertCmdFile(f, nil)
if err == nil {
t.Error("expected error for invalid CmdFile")
}
}
func TestResult_String(t *testing.T) {
r := Result{
Status: 1,
ExitStatus: 0,
Error: "",
Time: uint64(time.Second),
RunTime: uint64(time.Second),
Memory: 1024,
Files: map[string]string{"foo": "bar"},
}
s := r.String()
if s == "" {
t.Error("expected non-empty string")
}
}
func TestConvertPipe(t *testing.T) {
p := PipeMap{
In: PipeIndex{Index: 1, Fd: 2},
Out: PipeIndex{Index: 3, Fd: 4},
Name: "pipe",
Max: 100,
Proxy: true,
}
wp := convertPipe(p)
if wp.In.Index != 1 || wp.Out.Fd != 4 || wp.Name != "pipe" || wp.Limit != 100 {
t.Errorf("unexpected convertPipe result: %+v", wp)
}
}
func TestConvertRequest_Basic(t *testing.T) {
src := "/tmp/foo"
content := "abc"
fileID := "id"
name := "out"
max := int64(123)
copyOut := []string{"result.txt", "log.txt?"}
req := &Request{
Cmd: []Cmd{{
Args: []string{"echo", "hello"},
Files: []*CmdFile{{Src: &src}, {Content: &content}, {FileID: &fileID}, {Name: &name, Max: &max}},
CopyOut: copyOut,
CPULimit: uint64(1000 * time.Millisecond),
MemoryLimit: 1024,
}},
}
workerReq, err := ConvertRequest(req, []string{"/tmp"})
if err != nil {
t.Fatalf("ConvertRequest error: %v", err)
}
if len(workerReq.Cmd[0].Files) != 4 {
t.Errorf("expected 4 files, got %d", len(workerReq.Cmd[0].Files))
}
if len(workerReq.Cmd[0].CopyOut) != 2 {
t.Errorf("expected 2 copyOut, got %d", len(workerReq.Cmd[0].CopyOut))
}
if workerReq.Cmd[0].CPULimit != 1000*time.Millisecond {
t.Errorf("unexpected CPULimit: %v", workerReq.Cmd[0].CPULimit)
}
if workerReq.Cmd[0].MemoryLimit != 1024 {
t.Errorf("unexpected MemoryLimit: %v", workerReq.Cmd[0].MemoryLimit)
}
}
func TestConvertRequest_InvalidFile(t *testing.T) {
req := &Request{
Cmd: []Cmd{
{
Files: []*CmdFile{{}}, // invalid
},
},
}
_, err := ConvertRequest(req, nil)
if err == nil {
t.Error("expected error for invalid CmdFile")
}
}
func TestConvertResponse_Basic(t *testing.T) {
res := worker.Response{
Results: []worker.Result{{
Status: 1,
ExitStatus: 0,
Error: "",
Time: 1000 * time.Millisecond,
RunTime: 900 * time.Millisecond,
Memory: 2048,
ProcPeak: 2,
Files: map[string]*os.File{},
FileError: []worker.FileError{{Name: "foo", Type: 1, Message: "err"}},
}},
}
resp, _ := ConvertResponse(res, false)
if resp.Results[0].Status != 1 {
t.Errorf("unexpected Status: %v", resp.Results[0].Status)
}
if resp.Results[0].Time != uint64(1000*time.Millisecond) {
t.Errorf("unexpected Time: %v", resp.Results[0].Time)
}
if len(resp.Results[0].FileError) != 1 {
t.Errorf("unexpected FileError: %+v", resp.Results[0].FileError)
}
}

View File

@ -0,0 +1,7 @@
package model
import "unsafe"
func byteArrayToString(buf []byte) string {
return *(*string)(unsafe.Pointer(&buf))
}

View File

@ -0,0 +1,30 @@
package model
import (
"io"
"os"
"unsafe"
)
func fileToByteGeneric(f *os.File) ([]byte, error) {
defer f.Close()
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
return nil, err
}
s := fi.Size()
c := make([]byte, s)
if _, err := io.ReadFull(f, c); err != nil {
return nil, err
}
return c, nil
}
func strToBytes(s string) []byte {
return unsafe.Slice(unsafe.StringData(s), len(s))
}

View File

@ -0,0 +1,38 @@
package model
import (
"os"
"syscall"
)
func fileToByte(f *os.File, mmap bool) ([]byte, error) {
if mmap {
return fileToByteMmap(f)
}
return fileToByteGeneric(f)
}
func fileToByteMmap(f *os.File) ([]byte, error) {
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
s := fi.Size()
if s == 0 {
return []byte{}, nil
}
b, err := syscall.Mmap(int(f.Fd()), 0, int(s), syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
return nil, err
}
return b, nil
}
func releaseByte(b []byte) {
if len(b) > 0 {
_ = syscall.Munmap(b)
}
}

View File

@ -0,0 +1,12 @@
//go:build !linux
package model
import "os"
func fileToByte(f *os.File, mmap bool) ([]byte, error) {
return fileToByteGeneric(f)
}
func releaseByte(b []byte) {
}

View File

@ -0,0 +1,81 @@
package restexecutor
import (
"encoding/json"
"fmt"
"net/http"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
type cmdHandle struct {
worker worker.Worker
srcPrefix []string
logger *zap.Logger
}
// NewCmdHandle creates a new command handle
func NewCmdHandle(worker worker.Worker, srcPrefix []string, logger *zap.Logger) Register {
return &cmdHandle{
worker: worker,
srcPrefix: srcPrefix,
logger: logger,
}
}
func (c *cmdHandle) Register(r *gin.Engine) {
// Run handle
r.POST("/run", c.handleRun)
}
func (c *cmdHandle) handleRun(ctx *gin.Context) {
var req model.Request
if err := ctx.ShouldBindJSON(&req); err != nil {
ctx.Error(err)
ctx.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
if len(req.Cmd) == 0 {
ctx.AbortWithStatusJSON(http.StatusBadRequest, "no cmd provided")
return
}
r, err := model.ConvertRequest(&req, c.srcPrefix)
if err != nil {
ctx.Error(err)
ctx.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
if ce := c.logger.Check(zap.DebugLevel, "request"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", r)))
}
rtCh, _ := c.worker.Submit(ctx.Request.Context(), r)
rt := <-rtCh
if ce := c.logger.Check(zap.DebugLevel, "response"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", rt)))
}
if rt.Error != nil {
ctx.Error(rt.Error)
ctx.AbortWithStatusJSON(http.StatusInternalServerError, rt.Error.Error())
return
}
// encode json directly to avoid allocation
ctx.Status(http.StatusOK)
ctx.Header("Content-Type", "application/json; charset=utf-8")
res, err := model.ConvertResponse(rt, true)
if err != nil {
ctx.Error(err)
ctx.AbortWithStatusJSON(http.StatusInternalServerError, err.Error())
return
}
defer res.Close()
if err := json.NewEncoder(ctx.Writer).Encode(res.Results); err != nil {
ctx.Error(err)
}
}

View File

@ -0,0 +1,182 @@
package restexecutor
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
"go.uber.org/zap/zaptest"
"io"
"maps"
"net/http"
"net/http/httptest"
"slices"
"testing"
"time"
)
// mockWorker is a mock implementation of the worker.Worker interface
type mockWorker struct {
// The result to send back when Submit is called
Result worker.Result
worker.Worker
}
func (m *mockWorker) Submit(_ context.Context, req *worker.Request) (<-chan worker.Response, <-chan struct{}) {
// Mock implementation
rtCh := make(chan worker.Response, 1)
rtCh <- worker.Response{
RequestID: req.RequestID,
Results: []worker.Result{m.Result},
}
return rtCh, nil
}
// ptr is a helper function to create a pointer to a value
func ptr[T any](v T) *T {
return &v
}
// requestToReader converts a model.Request to an io.Reader
func requestToReader(req model.Request) io.Reader {
// Convert the request to JSON
data, err := json.Marshal(req)
if err != nil {
return nil
}
// Create a new reader from the JSON data
return io.NopCloser(bytes.NewReader(data))
}
// assertWorkerResultEqualsModelResult checks if two worker.Result are equal
func assertWorkerResultEqualsModelResult(a worker.Result, b model.Result) error {
if a.Status.String() != b.Status.String() {
return fmt.Errorf("expected status %s, got %s", a.Status.String(), b.Status.String())
}
if a.ExitStatus != b.ExitStatus {
return fmt.Errorf("expected exit status %d, got %d", a.ExitStatus, b.ExitStatus)
}
if a.Error != b.Error {
return fmt.Errorf("expected error %s, got %s", a.Error, b.Error)
}
if a.Time != time.Duration(b.Time) {
return fmt.Errorf("expected time %s, got %s", a.Time, time.Duration(b.Time))
}
if a.Memory != worker.Size(b.Memory) {
return fmt.Errorf("expected memory %d, got %d", a.Memory, worker.Size(b.Memory))
}
if a.RunTime != time.Duration(b.RunTime) {
return fmt.Errorf("expected run time %s, got %s", a.RunTime, time.Duration(b.RunTime))
}
if a.ProcPeak != b.ProcPeak {
return fmt.Errorf("expected proc peak %d, got %d", a.ProcPeak, b.ProcPeak)
}
if !maps.Equal(a.FileIDs, b.FileIDs) {
return fmt.Errorf("expected file IDs %v, got %v", a.FileIDs, b.FileIDs)
}
if !slices.Equal(a.FileError, b.FileError) {
return fmt.Errorf("expected file errors %v, got %v", a.FileError, b.FileError)
}
return nil
}
// TestHandleRun tests the handleRun method of the cmdHandle
func TestHandleRun(t *testing.T) {
// Create a new Gin router
router := gin.Default()
// Create a mock worker
mockWorker := &mockWorker{
Result: worker.Result{
Status: envexec.StatusAccepted,
ExitStatus: 0,
Error: "",
Time: time.Millisecond * 30,
Memory: 32243712,
RunTime: time.Millisecond * 52,
FileIDs: map[string]string{
"a": "5LWIZAA45JHX4Y4Z",
"a.cc": "NOHPGGDTYQUFRSLJ",
},
},
}
// Create a logger
logger := zaptest.NewLogger(t)
// Create a new command handle
cmdHandle := NewCmdHandle(mockWorker, nil, logger)
cmdHandle.Register(router)
// Create a test request
req := model.Request{
RequestID: "qwq",
Cmd: []model.Cmd{
{
Args: []string{"/usr/bin/g++", "a.cc", "-o", "a"},
Env: []string{"PATH=/usr/bin:/bin"},
Files: []*model.CmdFile{
{
Content: ptr(""),
}, {
Name: ptr("stdout"),
Max: ptr(int64(10240)),
}, {
Name: ptr("stderr"),
Max: ptr(int64(10240)),
}},
CPULimit: 10000000000,
MemoryLimit: 104857600,
ProcLimit: 50,
CopyIn: map[string]model.CmdFile{
"a.cc": {
Content: ptr("#include <iostream>\nusing namespace std;\nint main() {\nint a, b;\ncin >> a >> b;\ncout << a + b << endl;\n}"),
},
},
CopyOut: []string{"stdout", "stderr"},
CopyOutCached: []string{"a.cc", "a"},
},
},
}
// Convert the request to a reader
requestBody := requestToReader(req)
// Create a test HTTP request
testReq := httptest.NewRequest("POST", "/run", requestBody)
// Set the request context
testReq = testReq.WithContext(context.Background())
// Set the request header
testReq.Header.Set("Content-Type", "application/json")
// Create a test HTTP recorder
recorder := httptest.NewRecorder()
// Serve the HTTP request
router.ServeHTTP(recorder, testReq)
// Check the response status code
if recorder.Code != http.StatusOK {
t.Fatalf("Expected status %d, got %d", 200, recorder.Code)
}
// Check the response body
responseBody := recorder.Body.String()
if responseBody == "" {
t.Fatalf("Expected non-empty response body, got empty")
}
// Print the response body
t.Logf("Response body: %s", responseBody)
// Check if the response is valid JSON
var response []model.Result
if err := json.Unmarshal([]byte(responseBody), &response); err != nil {
t.Fatalf("Failed to unmarshal response: %v", err)
}
if len(response) != 1 {
t.Fatalf("Expected 1 result, got %d", len(response))
}
// Check if the response matches the expected result
if err := assertWorkerResultEqualsModelResult(mockWorker.Result, response[0]); err != nil {
t.Fatalf("Expected result to match, but got error: %v", err)
}
}

View File

@ -1,12 +1,13 @@
package main package restexecutor
import ( import (
"fmt" "fmt"
"io/ioutil" "io"
"mime" "mime"
"net/http" "net/http"
"path" "path/filepath"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore" "github.com/criyle/go-judge/filestore"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@ -15,6 +16,21 @@ type fileHandle struct {
fs filestore.FileStore fs filestore.FileStore
} }
// NewFileHandle creates a new file handle
func NewFileHandle(fs filestore.FileStore) Register {
return &fileHandle{
fs: fs,
}
}
func (f *fileHandle) Register(r *gin.Engine) {
// File handle
r.GET("/file", f.fileGet)
r.POST("/file", f.filePost)
r.GET("/file/:fid", f.fileIDGet)
r.DELETE("/file/:fid", f.fileIDDelete)
}
func (f *fileHandle) fileGet(c *gin.Context) { func (f *fileHandle) fileGet(c *gin.Context) {
ids := f.fs.List() ids := f.fs.List()
c.JSON(http.StatusOK, ids) c.JSON(http.StatusOK, ids)
@ -32,13 +48,18 @@ func (f *fileHandle) filePost(c *gin.Context) {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
b, err := ioutil.ReadAll(fi) sf, err := f.fs.New()
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
defer sf.Close()
id, err := f.fs.Add(fh.Filename, b) if _, err := sf.ReadFrom(fi); err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
id, err := f.fs.Add(fh.Filename, sf.Name())
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
@ -56,19 +77,33 @@ func (f *fileHandle) fileIDGet(c *gin.Context) {
return return
} }
file := f.fs.Get(uri.FileID) name, file := f.fs.Get(uri.FileID)
if file == nil { if file == nil {
c.AbortWithStatus(http.StatusNotFound) c.AbortWithStatus(http.StatusNotFound)
return return
} }
content, err := file.Content() typ := mime.TypeByExtension(filepath.Ext(name))
c.Header("Content-Type", typ)
fi, ok := file.(*envexec.FileInput) // fast path
if ok {
c.FileAttachment(fi.Path, name)
return
}
r, err := envexec.FileToReader(file)
if err != nil { if err != nil {
c.AbortWithError(http.StatusInternalServerError, err) c.AbortWithError(http.StatusInternalServerError, err)
return return
} }
defer r.Close()
typ := mime.TypeByExtension(path.Ext(file.Name())) content, err := io.ReadAll(r)
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", file.Name())) if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", name))
c.Data(http.StatusOK, typ, content) c.Data(http.StatusOK, typ, content)
} }

View File

@ -0,0 +1,238 @@
package restexecutor
import (
"bytes"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/criyle/go-judge/filestore"
"github.com/gin-gonic/gin"
)
func TestFilePost(t *testing.T) {
// Create a temporary directory for the file store
tempDir := t.TempDir()
// Initialize the file store
router := gin.Default()
f := &fileHandle{fs: filestore.NewFileLocalStore(tempDir)}
router.POST("/file", f.filePost)
// Create a buffer to simulate multipart form
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
// Create a form file
fileWriter, err := writer.CreateFormFile("file", "test.py")
if err != nil {
t.Fatalf("Failed to create form file: %v", err)
}
// Write some content to the file
contentToWrite := "print(58 - 7 * 3)"
_, err = fileWriter.Write([]byte(contentToWrite))
if err != nil {
t.Fatalf("Failed to write to form file: %v", err)
}
err = writer.Close()
if err != nil {
t.Fatalf("Failed to close writer: %v", err)
}
// Create HTTP request
req := httptest.NewRequest("POST", "/file", body)
req.Header.Set("Content-Type", writer.FormDataContentType())
// Record the response
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
// Check the response status code
if w.Code != http.StatusOK {
t.Fatalf("Expected status %d, got %d", http.StatusOK, w.Code)
}
fileID := w.Body.String()
// Check if the length of fileID is correct
if len(fileID) <= 3 {
t.Fatalf("Expected file ID length greater than 3, got %d", len(fileID))
}
// Remove quotes from the response
fileID = fileID[1 : len(fileID)-1]
// Check if the file is stored correctly
filePath := filepath.Join(tempDir, fileID)
_, err = os.Stat(filePath)
if os.IsNotExist(err) {
t.Fatalf("File should exist in the storage: %v", err)
}
// Read the file content
file, err := os.Open(filePath)
if err != nil {
t.Fatalf("Failed to open file: %v", err)
}
defer file.Close()
content := make([]byte, len(contentToWrite))
_, err = file.Read(content)
if err != nil {
t.Fatalf("Failed to read file: %v", err)
}
// Check if the content matches
if string(content) != contentToWrite {
t.Fatalf("File content does not match: expected %s, got %s", contentToWrite, string(content))
}
}
// CreateFileWithContent creates a file with the specified content in the given directory
func CreateFileWithContent(filePath, content string) error {
// Create a file with the specified content
return os.WriteFile(filePath, []byte(content), 0644)
}
// TestFileGet tests the file retrieval functionality
func TestFileGet(t *testing.T) {
// Create a temporary directory for the file store
tempDir := t.TempDir()
// Initialize the file store
router := gin.Default()
f := &fileHandle{fs: filestore.NewFileLocalStore(tempDir)}
router.GET("/file", f.fileGet)
type FileToCreate struct {
Name string
Content string
}
filesToCreate := []FileToCreate{
{"test1.py", "print(58 - 7 * 3)"},
{"test2.py", "print(58 + 7 * 3)"},
{"test3.py", "print(58 / 7 * 3)"},
}
// Create files in the temporary directory
for _, file := range filesToCreate {
filePath := filepath.Join(tempDir, file.Name)
err := CreateFileWithContent(filePath, file.Content)
if err != nil {
t.Fatalf("Failed to create file: %v", err)
}
}
// Create HTTP request
req := httptest.NewRequest("GET", "/file", nil)
// Record the response
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
// Check the response status code
if w.Code != http.StatusOK {
t.Fatalf("Expected status %d, got %d", http.StatusOK, w.Code)
}
fileIDs := w.Body.String()
t.Logf("File IDs: %s", fileIDs)
for _, file := range filesToCreate {
testFileName := file.Name
// Check if the file ID is present in the response
if !strings.Contains(fileIDs, testFileName) {
t.Fatalf("Expected file ID %s to be present in the response", testFileName)
}
}
}
// TestFileIDGet tests the file retrieval by ID functionality
func TestFileIDGet(t *testing.T) {
// Create a temporary directory for the file store
tempDir := t.TempDir()
// Initialize the file store
router := gin.Default()
f := &fileHandle{fs: filestore.NewFileLocalStore(tempDir)}
router.GET("/file/:fid", f.fileIDGet)
// Create a test file
testFileName := "test.py"
testFilePath := filepath.Join(tempDir, testFileName)
err := CreateFileWithContent(testFilePath, "print(58 - 7 * 3)")
if err != nil {
t.Fatalf("Failed to create test file: %v", err)
}
// Add the file to the file store
fileID, err := f.fs.Add(testFileName, testFilePath)
if err != nil {
t.Fatalf("Failed to add file to storage: %v", err)
}
// Create HTTP request
req := httptest.NewRequest("GET", "/file/"+fileID, nil)
// Record the response
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
// Check the response status code
if w.Code != http.StatusOK {
t.Fatalf("Expected status %d, got %d", http.StatusOK, w.Code)
}
bodyBytes, err := os.ReadFile(filepath.Join(tempDir, fileID))
if err != nil {
t.Fatalf("Failed to read response body: %v", err)
}
bodyString := string(bodyBytes)
expectedContent := "print(58 - 7 * 3)"
if bodyString != expectedContent {
t.Fatalf("Expected response body %s, got %s", expectedContent, bodyString)
}
}
// TestFileIDDelete tests the file deletion functionality
func TestFileIDDelete(t *testing.T) {
// Create a temporary directory for the file store
tempDir := t.TempDir()
// Initialize the file store
router := gin.Default()
f := &fileHandle{fs: filestore.NewFileLocalStore(tempDir)}
router.DELETE("/file/:fid", f.fileIDDelete)
// Create a test file
testFileName := "test.py"
testFilePath := filepath.Join(tempDir, testFileName)
err := CreateFileWithContent(testFilePath, "print(58 - 7 * 3)")
if err != nil {
t.Fatalf("Failed to create test file: %v", err)
}
// Add the file to the file store
fileID, err := f.fs.Add(testFileName, testFilePath)
if err != nil {
t.Fatalf("Failed to add file to storage: %v", err)
}
// Create HTTP request
req := httptest.NewRequest("DELETE", "/file/"+fileID, nil)
// Record the response
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
// Check the response status code
if w.Code != http.StatusOK {
t.Fatalf("Expected status %d, got %d", http.StatusOK, w.Code)
}
// Check if the file is deleted from the storage
if _, err := os.Stat(filepath.Join(tempDir, fileID)); !os.IsNotExist(err) {
t.Fatalf("Expected file to be deleted, but it still exists")
}
}

View File

@ -0,0 +1,8 @@
package restexecutor
import "github.com/gin-gonic/gin"
// Register registers executor the handler
type Register interface {
Register(*gin.Engine)
}

View File

@ -0,0 +1,86 @@
package stream
import (
"errors"
"fmt"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/filestore"
"github.com/criyle/go-judge/worker"
)
var (
_ worker.CmdFile = &fileStreamIn{}
_ worker.CmdFile = &fileStreamOut{}
errNoTTY = errors.New("stream is not a tty")
)
type fileStreamIn struct {
stream envexec.FileStreamIn
index int
fd int
hasTTY bool
}
func newFileStreamIn(index, fd int, hasTTY bool) *fileStreamIn {
return &fileStreamIn{
stream: envexec.NewFileStreamIn(),
index: index,
fd: fd,
hasTTY: hasTTY,
}
}
func (f *fileStreamIn) SetSize(s *envexec.TerminalSize) error {
if !f.hasTTY {
return errNoTTY
}
return f.stream.SetSize(s)
}
func (f *fileStreamIn) Write(b []byte) (int, error) {
return f.stream.Write(b)
}
func (f *fileStreamIn) EnvFile(fs filestore.FileStore) (envexec.File, error) {
return f.stream, nil
}
func (f *fileStreamIn) String() string {
return fmt.Sprintf("fileStreamIn:(index:%d,fd:%d)", f.index, f.fd)
}
func (f *fileStreamIn) Close() error {
return f.stream.Close()
}
type fileStreamOut struct {
stream envexec.FileStreamOut
index int
fd int
}
func newFileStreamOut(index, fd int) *fileStreamOut {
return &fileStreamOut{
stream: envexec.NewFileStreamOut(),
index: index,
fd: fd,
}
}
func (f *fileStreamOut) Read(b []byte) (int, error) {
return f.stream.Read(b)
}
func (f *fileStreamOut) EnvFile(fs filestore.FileStore) (envexec.File, error) {
return f.stream, nil
}
func (f *fileStreamOut) String() string {
return fmt.Sprintf("fileStreamOut:(index:%d,fd:%d)", f.index, f.fd)
}
func (f *fileStreamOut) Close() error {
return f.stream.Close()
}

View File

@ -0,0 +1,292 @@
package stream
import (
"context"
"errors"
"fmt"
"io"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/worker"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
const (
newBuffLen = 32 << 10
minBuffLen = 4 << 10
)
// Stream defines the transport layer for the stream execution that
// stream input and output interactively
type Stream interface {
Send(Response) error
Recv() (*Request, error)
}
// Request defines operations receive from the remote
type Request struct {
Request *model.Request
Resize *ResizeRequest
Input *InputRequest
Cancel *struct{}
}
// Response defines response to the remote
type Response struct {
Response *model.Response
Output *OutputResponse
}
// ResizeRequest defines resize operation to the virtual terminal
type ResizeRequest struct {
Index int `json:"index,omitempty"`
Fd int `json:"fd,omitempty"`
Rows int `json:"rows,omitempty"`
Cols int `json:"cols,omitempty"`
X int `json:"x,omitempty"`
Y int `json:"y,omitempty"`
}
// InputRequest defines input operation from the remote
type InputRequest struct {
Index int
Fd int
Content []byte
}
// OutputResponse defines output result to the remote
type OutputResponse struct {
Index int
Fd int
Content []byte
}
var (
errFirstMustBeExec = errors.New("the first stream request must be exec request")
)
// Start initiate a interactive execution on the worker and transmit the request and response over Stream transport layer
func Start(baseCtx context.Context, s Stream, w worker.Worker, srcPrefix []string, logger *zap.Logger) error {
req, err := s.Recv()
if err != nil {
return err
}
if req.Request == nil {
return errFirstMustBeExec
}
rq, streamIn, streamOut, err := convertStreamRequest(req.Request, srcPrefix)
if err != nil {
return fmt.Errorf("convert exec request: %w", err)
}
closeFunc := func() {
for _, f := range streamIn {
f.Close()
}
streamIn = nil
for _, f := range streamOut {
f.Close()
}
streamOut = nil
}
defer closeFunc()
if ce := logger.Check(zap.DebugLevel, "request"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", rq)))
}
var wg errgroup.Group
execCtx, execCancel := context.WithCancel(baseCtx)
defer execCancel()
ctx, cancel := context.WithCancel(baseCtx)
defer cancel()
// stream in
wg.Go(func() error {
if err := streamInput(ctx, s, streamIn, execCancel); err != nil {
cancel()
return err
}
return nil
})
// stream out
outCh := make(chan *OutputResponse, len(streamOut))
if len(streamOut) > 0 {
for _, so := range streamOut {
so := so
wg.Go(func() error {
return streamOutput(ctx, outCh, so)
})
}
}
rtCh := w.Execute(execCtx, rq)
err = sendLoop(ctx, s, outCh, rtCh, logger)
cancel()
closeFunc()
wg.Wait()
return err
}
func sendLoop(ctx context.Context, s Stream, outCh chan *OutputResponse, rtCh <-chan worker.Response, logger *zap.Logger) error {
for {
select {
case <-ctx.Done(): // error occur
return ctx.Err()
case o := <-outCh:
err := s.Send(Response{Output: o})
if err != nil {
return fmt.Errorf("send output: %w", err)
}
case rt := <-rtCh:
if ce := logger.Check(zap.DebugLevel, "response"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", rt)))
}
ret, err := model.ConvertResponse(rt, false)
if err != nil {
return fmt.Errorf("convert response: %w", err)
}
return s.Send(Response{Response: &model.Response{Results: ret.Results}})
}
}
}
func convertStreamRequest(m *model.Request, srcPrefix []string) (req *worker.Request, streamIn []*fileStreamIn, streamOut []*fileStreamOut, err error) {
type cmdStream struct {
index int
fd int
f worker.CmdFile
}
defer func() {
if err != nil {
for _, fi := range streamIn {
fi.Close()
}
streamIn = nil
for _, fi := range streamOut {
fi.Close()
}
streamOut = nil
}
}()
var streams []cmdStream
for i, c := range m.Cmd {
for j, f := range c.Files {
switch {
case f == nil:
continue
case f.StreamIn:
si := newFileStreamIn(i, j, c.TTY)
streamIn = append(streamIn, si)
streams = append(streams, cmdStream{index: i, fd: j, f: si})
c.Files[j] = nil
case f.StreamOut:
so := newFileStreamOut(i, j)
streamOut = append(streamOut, so)
streams = append(streams, cmdStream{index: i, fd: j, f: so})
c.Files[j] = nil
}
}
}
req, err = model.ConvertRequest(m, srcPrefix)
if err != nil {
return req, streamIn, streamOut, err
}
for _, f := range streams {
req.Cmd[f.index].Files[f.fd] = f.f
}
return
}
func streamInput(ctx context.Context, s Stream, si []*fileStreamIn, execCancel func()) error {
inf := make(map[int]*fileStreamIn)
for _, f := range si {
inf[f.index<<8|f.fd] = f
}
for {
select {
case <-ctx.Done():
return nil
default:
}
in, err := s.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch {
case in.Input != nil:
f, ok := inf[in.Input.Index<<8|in.Input.Fd]
if !ok {
return fmt.Errorf("input does not exist: %d/%d", in.Input.Index, in.Input.Fd)
}
_, err := f.Write(in.Input.Content)
if err == io.EOF { // file closed with io.EOF
return nil
}
if err != nil {
return fmt.Errorf("write to input %d/%d: %w", in.Input.Index, in.Input.Fd, err)
}
case in.Resize != nil:
f, ok := inf[in.Resize.Index<<8|in.Resize.Fd]
if !ok {
return fmt.Errorf("input does not exist: %d/%d", in.Resize.Index, in.Resize.Fd)
}
if err = f.SetSize(&envexec.TerminalSize{
Cols: uint16(in.Resize.Cols),
Rows: uint16(in.Resize.Rows),
X: uint16(in.Resize.X),
Y: uint16(in.Resize.Y),
}); err != nil {
return fmt.Errorf("resize %d/%d: %w", in.Resize.Index, in.Resize.Fd, err)
}
case in.Cancel != nil:
execCancel()
return nil
default:
return fmt.Errorf("invalid request")
}
}
}
func streamOutput(ctx context.Context, outCh chan *OutputResponse, so *fileStreamOut) error {
var buf []byte
for {
select {
case <-ctx.Done():
return nil
default:
}
if len(buf) < minBuffLen {
buf = make([]byte, newBuffLen)
}
n, err := so.Read(buf)
if err != nil { // file closed with io.EOF
return nil
}
select {
case <-ctx.Done():
return nil
case outCh <- &OutputResponse{
Index: so.index,
Fd: so.fd,
Content: buf[:n],
}:
}
buf = buf[n:]
}
}

View File

@ -0,0 +1,3 @@
package version
//go:generate bash -c "git describe --tags > version.txt"

View File

@ -0,0 +1,33 @@
package version
import (
"embed"
"io"
"runtime/debug"
"strings"
)
//go:embed version.*
var versions embed.FS
// Version defines the version of go-judge
var Version string = "unable to get version"
func init() {
f, err := versions.Open("version.txt")
if err != nil {
// go generate was not run, assuming installed by go install
// get version information from debug
inf, ok := debug.ReadBuildInfo()
if !ok {
return
}
Version = inf.Main.Version
return
}
s, err := io.ReadAll(f)
if err != nil {
return
}
Version = strings.TrimSpace(string(s))
}

View File

@ -0,0 +1,120 @@
package wsexecutor
import (
"context"
"encoding/json"
"fmt"
"io"
"time"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/cmd/go-judge/stream"
"github.com/gorilla/websocket"
)
var _ stream.Stream = &streamWrapper{}
type streamWrapper struct {
ctx context.Context
conn *websocket.Conn
sendCh chan stream.Response
}
func (w *streamWrapper) sendLoop() {
conn := w.conn
defer conn.Close()
ticker := time.NewTicker(pingPeriod)
defer ticker.Stop()
for {
select {
case <-w.ctx.Done():
return
case r := <-w.sendCh:
conn.SetWriteDeadline(time.Now().Add(writeWait))
switch {
case r.Response != nil:
w, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
return
}
if _, err := w.Write([]byte{1}); err != nil {
return
}
if err := json.NewEncoder(w).Encode(r.Response); err != nil {
return
}
if err := w.Close(); err != nil {
return
}
conn.Close()
return
case r.Output != nil:
w, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
return
}
if _, err := w.Write([]byte{2, byte(r.Output.Index<<4 | r.Output.Fd)}); err != nil {
return
}
if _, err := w.Write(r.Output.Content); err != nil {
return
}
if err := w.Close(); err != nil {
return
}
}
case <-ticker.C:
conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
func (w *streamWrapper) Send(resp stream.Response) error {
select {
case <-w.ctx.Done():
return w.ctx.Err()
case w.sendCh <- resp:
return nil
}
}
func (w *streamWrapper) Recv() (*stream.Request, error) {
conn := w.conn
_, buf, err := conn.ReadMessage()
if err != nil {
return nil, err
}
if len(buf) == 0 {
return nil, io.ErrUnexpectedEOF
}
var req stream.Request
switch buf[0] {
case 1:
req.Request = new(model.Request)
if err := json.Unmarshal(buf[1:], req.Request); err != nil {
return nil, err
}
case 2:
req.Resize = new(stream.ResizeRequest)
if err := json.Unmarshal(buf[1:], req.Resize); err != nil {
return nil, err
}
case 3:
if len(buf) < 2 {
return nil, io.ErrUnexpectedEOF
}
req.Input = new(stream.InputRequest)
req.Input.Index = int(buf[1]>>4) & 0xf
req.Input.Fd = int(buf[1]) & 0xf
req.Input.Content = buf[2:]
case 4:
req.Cancel = new(struct{})
default:
return nil, fmt.Errorf("invalid type code: %d", buf[0])
}
return &req, nil
}

View File

@ -0,0 +1,241 @@
package wsexecutor
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"github.com/criyle/go-judge/cmd/go-judge/model"
"github.com/criyle/go-judge/cmd/go-judge/stream"
"github.com/criyle/go-judge/worker"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"go.uber.org/zap"
)
var _ Register = &wsHandle{}
// Register registers web socket handle /ws
type Register interface {
Register(*gin.Engine)
}
// New creates new websocket handle
func New(worker worker.Worker, srcPrefix []string, logger *zap.Logger) Register {
return &wsHandle{
worker: worker,
srcPrefix: srcPrefix,
logger: logger,
}
}
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
const (
writeWait = 10 * time.Second
pongWait = 60 * time.Second
pingPeriod = 50 * time.Second
)
type wsHandle struct {
worker worker.Worker
srcPrefix []string
logger *zap.Logger
}
type wsRequest struct {
model.Request
CancelRequestID string `json:"cancelRequestId"`
}
func (h *wsHandle) Register(r *gin.Engine) {
r.GET("/ws", h.handleWS)
r.GET("/stream", h.handleStream)
}
func (h *wsHandle) handleWS(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
c.Error(err)
c.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
resultCh := make(chan model.Response, 128)
cm := newContextMap()
handleRequest := func(baseCtx context.Context, req *wsRequest) error {
if req.CancelRequestID != "" {
h.logger.Debug("ws cancel", zap.String("requestId", req.CancelRequestID))
cm.Remove(req.CancelRequestID)
return nil
}
r, err := model.ConvertRequest(&req.Request, h.srcPrefix)
if err != nil {
return fmt.Errorf("ws convert error: %w", err)
}
ctx, cancel := context.WithCancel(baseCtx)
if err := cm.Add(r.RequestID, cancel); err != nil {
select {
case <-baseCtx.Done():
case resultCh <- model.Response{
RequestID: req.RequestID,
ErrorMsg: err.Error(),
}:
}
cancel()
h.logger.Debug("ws request error", zap.Error(err))
return nil
}
go func() {
defer cm.Remove(r.RequestID)
if ce := h.logger.Check(zap.DebugLevel, "ws request"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", r)))
}
retCh, started := h.worker.Submit(ctx, r)
var ret worker.Response
select {
case <-baseCtx.Done(): // if connection lost
return
case <-ctx.Done(): // if context cancelled by cancelling request
select {
case <-started: // if started, wait for result
ret = <-retCh
default: // not started
ret = worker.Response{
RequestID: r.RequestID,
Error: fmt.Errorf("request cancelled before execute"),
}
}
case ret = <-retCh:
}
if ce := h.logger.Check(zap.DebugLevel, "response"); ce != nil {
ce.Write(zap.String("body", fmt.Sprintf("%+v", ret)))
}
resp, err := model.ConvertResponse(ret, false)
if err != nil {
resp = model.Response{
RequestID: r.RequestID,
ErrorMsg: resp.ErrorMsg,
}
}
select {
case <-baseCtx.Done():
case resultCh <- resp:
}
}()
return nil
}
// read request
go func() {
defer conn.Close()
conn.SetReadDeadline(time.Now().Add(pongWait))
conn.SetPongHandler(func(string) error {
conn.SetReadDeadline(time.Now().Add(pongWait))
return nil
})
baseCtx, baseCancel := context.WithCancel(context.TODO())
defer baseCancel()
for {
req := new(wsRequest)
if err := conn.ReadJSON(req); err != nil {
h.logger.Info("ws read error", zap.Error(err))
return
}
if err := handleRequest(baseCtx, req); err != nil {
h.logger.Info("ws handle error", zap.Error(err))
return
}
}
}()
// write result
go func() {
defer conn.Close()
ticker := time.NewTicker(pingPeriod)
defer ticker.Stop()
for {
select {
case r := <-resultCh:
conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := conn.WriteJSON(r); err != nil {
h.logger.Info("ws write error", zap.Error(err))
return
}
case <-ticker.C:
conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}()
}
func (h *wsHandle) handleStream(c *gin.Context) {
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
c.Error(err)
c.AbortWithStatusJSON(http.StatusBadRequest, err.Error())
return
}
conn.SetReadDeadline(time.Now().Add(pongWait))
conn.SetPongHandler(func(string) error {
conn.SetReadDeadline(time.Now().Add(pongWait))
return nil
})
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
w := &streamWrapper{ctx: ctx, conn: conn, sendCh: make(chan stream.Response)}
go w.sendLoop()
if err := stream.Start(ctx, w, h.worker, h.srcPrefix, h.logger); err != nil {
h.logger.Debug("stream start", zap.Error(err))
c.Error(err)
}
}
type contextMap struct {
m map[string]context.CancelFunc
mu sync.Mutex
}
func newContextMap() *contextMap {
return &contextMap{m: make(map[string]context.CancelFunc)}
}
func (c *contextMap) Add(reqID string, cancel context.CancelFunc) error {
if reqID == "" {
return fmt.Errorf("empty request id")
}
c.mu.Lock()
defer c.mu.Unlock()
if _, exist := c.m[reqID]; exist {
return fmt.Errorf("duplicated request id: %q", reqID)
}
c.m[reqID] = cancel
return nil
}
func (c *contextMap) Remove(reqID string) {
c.mu.Lock()
defer c.mu.Unlock()
if cancel, exist := c.m[reqID]; exist {
delete(c.m, reqID)
cancel()
}
}

View File

@ -1,2 +0,0 @@
root:x:0:0::/w:/bin/bash
go-judge:x:1536:1536::/w:/bin/bash

11
env/config.go vendored
View File

@ -2,24 +2,17 @@ package env
import "time" import "time"
// Logger defines logger to print logs
type Logger interface {
Debug(args ...interface{})
Info(args ...interface{})
Warn(args ...interface{})
Error(args ...interface{})
}
// Config defines parameters to create environment builder // Config defines parameters to create environment builder
type Config struct { type Config struct {
ContainerInitPath string ContainerInitPath string
TmpFsParam string TmpFsParam string
NetShare bool NetShare bool
MountConf string MountConf string
SeccompConf string
CgroupPrefix string CgroupPrefix string
Cpuset string Cpuset string
ContainerCredStart int ContainerCredStart int
EnableCPURate bool EnableCPURate bool
CPUCfsPeriod time.Duration CPUCfsPeriod time.Duration
Logger NoFallback bool
} }

8
env/doc.go vendored
View File

@ -1,2 +1,8 @@
// Package env provides a unified method to create environment for envexec // Package env provides a unified method to create environment for envexec.
//
// For linux, the env creates container & cgroup sandbox.
//
// For windows, the env creates low mandatory level sandbox.
//
// For macOS, the env creates sandbox_init sandbox.
package env package env

173
env/env_cgroup_linux.go vendored Normal file
View File

@ -0,0 +1,173 @@
package env
import (
"context"
"fmt"
"os"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/criyle/go-judge/env/linuxcontainer"
"github.com/criyle/go-sandbox/pkg/cgroup"
ddbus "github.com/godbus/dbus/v5"
"go.uber.org/zap"
)
func setupCgroup(c Config, logger *zap.Logger) (cgroup.Cgroup, *cgroup.Controllers, error) {
prefix := c.CgroupPrefix
t := cgroup.DetectedCgroupType
ct, err := cgroup.GetAvailableController()
if err != nil {
logger.Error("failed to get available controllers", zap.Error(err))
return nil, nil, err
}
if t == cgroup.TypeV2 {
prefix, ct, err = setupCgroupV2(prefix, logger)
if err != nil {
return nil, nil, err
}
}
return createAndNestCgroup(prefix, ct, c.NoFallback, logger)
}
func setupCgroupV2(prefix string, logger *zap.Logger) (string, *cgroup.Controllers, error) {
logger.Info("running with cgroup v2, connecting systemd dbus to create cgroup")
conn, err := getSystemdConnection()
if err != nil {
logger.Info("connecting to systemd dbus failed, assuming running in container, enable cgroup v2 nesting support and take control of the whole cgroupfs", zap.Error(err))
return "", getControllersWithPrefix("", logger), nil
}
defer conn.Close()
scopeName := prefix + ".scope"
logger.Info("connected to systemd bus, attempting to create transient unit", zap.String("scopeName", scopeName))
if err := startTransientUnit(conn, scopeName, logger); err != nil {
return "", nil, err
}
scopeName, err = cgroup.GetCurrentCgroupPrefix()
if err != nil {
logger.Error("failed to get current cgroup prefix", zap.Error(err))
return "", nil, err
}
logger.Info("current cgroup", zap.String("scope_name", scopeName))
ct, err := cgroup.GetAvailableControllerWithPrefix(scopeName)
if err != nil {
logger.Error("failed to get available controller with prefix", zap.Error(err))
return "", nil, err
}
return scopeName, ct, nil
}
func getSystemdConnection() (*dbus.Conn, error) {
if os.Getuid() == 0 {
return dbus.NewSystemConnectionContext(context.TODO())
}
return dbus.NewUserConnectionContext(context.TODO())
}
func startTransientUnit(conn *dbus.Conn, scopeName string, logger *zap.Logger) error {
properties := []dbus.Property{
dbus.PropDescription("go judge - a high performance sandbox service base on container technologies"),
dbus.PropWants(scopeName),
dbus.PropPids(uint32(os.Getpid())),
newSystemdProperty("Delegate", true),
newSystemdProperty("KillMode", "process"),
}
ch := make(chan string, 1)
if _, err := conn.StartTransientUnitContext(context.TODO(), scopeName, "replace", properties, ch); err != nil {
logger.Error("failed to start transient unit", zap.Error(err))
return fmt.Errorf("failed to start transient unit: %w", err)
}
s := <-ch
if s != "done" {
logger.Error("starting transient unit returns error", zap.String("status", s))
return fmt.Errorf("starting transient unit returns error: %s", s)
}
return nil
}
func getControllersWithPrefix(prefix string, logger *zap.Logger) *cgroup.Controllers {
ct, err := cgroup.GetAvailableControllerWithPrefix(prefix)
if err != nil {
logger.Error("failed to get available controller with prefix", zap.Error(err))
return nil
}
return ct
}
func createAndNestCgroup(prefix string, ct *cgroup.Controllers, noFallback bool, logger *zap.Logger) (cgroup.Cgroup, *cgroup.Controllers, error) {
cgb, err := cgroup.New(prefix, ct)
if err != nil {
if os.Getuid() == 0 {
logger.Error("failed to create cgroup", zap.String("prefix", prefix), zap.Error(err))
return nil, nil, err
}
logger.Warn("not running in root and have no permission on cgroup, falling back to rlimit / rusage mode", zap.Error(err))
if noFallback {
return nil, nil, fmt.Errorf("failed to create cgroup with no fallback: %w", err)
}
return nil, nil, nil
}
logger.Info("creating nesting api cgroup", zap.Any("cgroup", cgb))
if _, err = cgb.Nest("api"); err != nil {
if os.Getuid() != 0 {
logger.Warn("creating api cgroup with error, falling back to rlimit / rusage mode", zap.Error(err))
cgb.Destroy()
if noFallback {
return nil, nil, fmt.Errorf("failed to create nesting api cgroup with no fallback: %w", err)
}
return nil, nil, nil
}
}
logger.Info("creating containers cgroup")
cg, err := cgb.New("containers")
if err != nil {
logger.Warn("creating containers cgroup with error, falling back to rlimit / rusage mode", zap.Error(err))
if noFallback {
return nil, nil, fmt.Errorf("failed to create containers cgroup with no fallback: %w", err)
}
return nil, nil, nil
}
if ct != nil && !ct.Memory {
logger.Warn("memory cgroup is not enabled, falling back to rlimit / rusage mode")
if noFallback {
return nil, nil, fmt.Errorf("memory cgroup is not enabled with no fallback: %w", err)
}
}
if ct != nil && !ct.Pids {
logger.Warn("pid cgroup is not enabled, proc limit does not have effect")
}
return cg, ct, nil
}
func prepareCgroupPool(cgb cgroup.Cgroup, c Config) linuxcontainer.CgroupPool {
if cgb != nil {
return linuxcontainer.NewFakeCgroupPool(cgb, c.CPUCfsPeriod)
}
return nil
}
func getCgroupInfo(cgb cgroup.Cgroup, ct *cgroup.Controllers) (int, []string) {
cgroupType := int(cgroup.DetectedCgroupType)
if cgb == nil {
cgroupType = 0
}
cgroupControllers := []string{}
if ct != nil {
cgroupControllers = ct.Names()
}
return cgroupType, cgroupControllers
}
func newSystemdProperty(name string, units any) dbus.Property {
return dbus.Property{
Name: name,
Value: ddbus.MakeVariant(units),
}
}

11
env/env_darwin.go vendored
View File

@ -1,8 +1,9 @@
package env package env
import ( import (
"github.com/criyle/go-judge/pkg/macsandbox" "github.com/criyle/go-judge/env/macsandbox"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/env/pool"
"go.uber.org/zap"
) )
var defaultRead = []string{ var defaultRead = []string{
@ -16,8 +17,8 @@ var defaultWrite = []string{
} }
// NewBuilder build a environment builder // NewBuilder build a environment builder
func NewBuilder(c Config) (pool.EnvBuilder, error) { func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any, error) {
b := macsandbox.NewBuilder("", defaultRead, defaultWrite, c.NetShare) b := macsandbox.NewBuilder("", defaultRead, defaultWrite, c.NetShare)
c.Info("created mac sandbox at", "") logger.Info("created mac sandbox")
return b, nil return b, map[string]any{}, nil
} }

328
env/env_linux.go vendored
View File

@ -1,16 +1,23 @@
package env package env
import ( import (
"io/ioutil" "context"
"fmt"
"os" "os"
"sync/atomic" "sync/atomic"
"syscall" "syscall"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/env/linuxcontainer"
"github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/container" "github.com/criyle/go-sandbox/container"
"github.com/criyle/go-sandbox/pkg/cgroup" "github.com/criyle/go-sandbox/pkg/cgroup"
"github.com/criyle/go-sandbox/pkg/forkexec" "github.com/criyle/go-sandbox/pkg/forkexec"
"github.com/criyle/go-sandbox/pkg/mount" "github.com/criyle/go-sandbox/pkg/mount"
"github.com/criyle/go-sandbox/runner"
"github.com/google/shlex"
"go.uber.org/zap"
"golang.org/x/sys/unix"
) )
const ( const (
@ -21,111 +28,286 @@ const (
) )
// NewBuilder build a environment builder // NewBuilder build a environment builder
func NewBuilder(c Config) (pool.EnvBuilder, error) { func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any, error) {
root, err := ioutil.TempDir("", "executorserver") mountsConfig, mountBuilder, symbolicLinks, maskPaths, err := prepareMountAndPaths(c, logger)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
c.Info("Created tmp dir for container root at:", root) m := mountBuilder.FilterNotExist().Mounts
var mb *mount.Builder seccomp, err := prepareSeccomp(c, logger)
if err != nil {
return nil, nil, err
}
unshareFlags, unshareCgroup := prepareUnshareFlags(c, logger)
credGen := prepareCredGen(c)
hostName, domainName, workDir, cUID, cGID, initCmd, err := prepareContainerMeta(mountsConfig, logger)
if err != nil {
return nil, nil, err
}
b := &container.Builder{
TmpRoot: "go-judge",
Mounts: m,
SymbolicLinks: symbolicLinks,
MaskPaths: maskPaths,
CredGenerator: credGen,
Stderr: os.Stderr,
CloneFlags: unshareFlags,
ExecFile: c.ContainerInitPath,
HostName: hostName,
DomainName: domainName,
InitCommand: initCmd,
WorkDir: workDir,
ContainerUID: cUID,
ContainerGID: cGID,
UnshareCgroupBeforeExec: unshareCgroup,
}
cgb, ct, err := setupCgroup(c, logger)
if err != nil {
return nil, nil, err
}
cgroupPool := prepareCgroupPool(cgb, c)
cgroupType, cgroupControllers := getCgroupInfo(cgb, ct)
conf := map[string]any{
"cgroupType": cgroupType,
"mount": m,
"symbolicLink": symbolicLinks,
"maskedPaths": maskPaths,
"hostName": hostName,
"domainName": domainName,
"workDir": workDir,
"uid": cUID,
"gid": cGID,
"cgroupControllers": cgroupControllers,
}
if tryClone3Builder := tryClone3(c, b, cgb, cgroupType, cgroupPool, workDir, seccomp, logger); tryClone3Builder != nil {
conf["clone3"] = true
return tryClone3Builder, conf, nil
}
return linuxcontainer.NewEnvBuilder(linuxcontainer.Config{
Builder: b,
CgroupPool: cgroupPool,
WorkDir: workDir,
Cpuset: c.Cpuset,
CPURate: c.EnableCPURate,
Seccomp: seccomp,
}), conf, nil
}
func prepareMountAndPaths(c Config, logger *zap.Logger) (*Mounts, *mount.Builder, []container.SymbolicLink, []string, error) {
mc, err := readMountConfig(c.MountConf) mc, err := readMountConfig(c.MountConf)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, err logger.Error("failed to read mount config", zap.String("path", c.MountConf), zap.Error(err))
} return nil, nil, nil, nil, err
c.Info("Mount.yaml(", c.MountConf, ") does not exists, use the default container mount")
mb = getDefaultMount(c.TmpFsParam)
} else {
mb, err = parseMountConfig(mc)
if err != nil {
return nil, err
} }
logger.Info("mount.yaml does not exist, using default container mount", zap.String("path", c.MountConf))
return nil, getDefaultMount(c.TmpFsParam), defaultSymLinks, defaultMaskPaths, nil
} }
m := mb.FilterNotExist().Mounts mountBuilder, err := parseMountConfig(mc)
c.Info("Created container mount at:", mb) if err != nil {
logger.Error("failed to parse mount config", zap.Error(err))
return nil, nil, nil, nil, err
}
var symbolicLinks []container.SymbolicLink
if len(mc.SymLinks) > 0 {
symbolicLinks = make([]container.SymbolicLink, 0, len(mc.SymLinks))
for _, l := range mc.SymLinks {
symbolicLinks = append(symbolicLinks, container.SymbolicLink{LinkPath: l.LinkPath, Target: l.Target})
}
} else {
symbolicLinks = defaultSymLinks
}
maskPaths := defaultMaskPaths
if len(mc.MaskPaths) > 0 {
maskPaths = mc.MaskPaths
}
logger.Info("created container mount", zap.Any("mountBuilder", mountBuilder))
return mc, mountBuilder, symbolicLinks, maskPaths, nil
}
func prepareSeccomp(c Config, logger *zap.Logger) ([]syscall.SockFilter, error) {
seccomp, err := readSeccompConf(c.SeccompConf)
if err != nil {
logger.Error("failed to load seccomp config", zap.String("path", c.SeccompConf), zap.Error(err))
return nil, fmt.Errorf("failed to load seccomp config: %w", err)
}
if seccomp != nil {
logger.Info("loaded seccomp filter", zap.String("path", c.SeccompConf))
}
return seccomp, nil
}
func prepareUnshareFlags(c Config, logger *zap.Logger) (uintptr, bool) {
unshareFlags := uintptr(forkexec.UnshareFlags) unshareFlags := uintptr(forkexec.UnshareFlags)
if c.NetShare { if c.NetShare {
unshareFlags ^= syscall.CLONE_NEWNET unshareFlags ^= syscall.CLONE_NEWNET
} }
major, minor := kernelVersion()
// use setuid container only if running in root privilege unshareFlags ^= unix.CLONE_NEWCGROUP
var credGen container.CredGenerator unshareCgroup := true
if os.Getuid() == 0 { if major < 4 || (major == 4 && minor < 6) {
cred := c.ContainerCredStart unshareCgroup = false
if cred == 0 { logger.Info("kernel version < 4.6, don't unshare cgroup", zap.Int("major", major), zap.Int("minor", minor))
cred = containerCredStart
}
credGen = newCredGen(uint32(cred))
} }
return unshareFlags, unshareCgroup
}
hostName := containerName func prepareCredGen(c Config) container.CredGenerator {
domainName := containerName if os.Getuid() == 0 && c.ContainerCredStart > 0 {
workDir := defaultWorkDir return newCredGen(uint32(c.ContainerCredStart))
cUID := containerCred }
cGID := containerCred return nil
}
func prepareContainerMeta(mc *Mounts, logger *zap.Logger) (hostName, domainName, workDir string, cUID, cGID int, initCmd []string, err error) {
hostName = containerName
domainName = containerName
workDir = defaultWorkDir
cUID = containerCred
cGID = containerCred
if mc != nil { if mc != nil {
hostName = mc.HostName if mc.HostName != "" {
domainName = mc.DomainName hostName = mc.HostName
workDir = mc.WorkDir }
cUID = mc.UID if mc.DomainName != "" {
cGID = mc.GID domainName = mc.DomainName
}
if mc.WorkDir != "" {
workDir = mc.WorkDir
}
if mc.UID != 0 {
cUID = mc.UID
}
if mc.GID != 0 {
cGID = mc.GID
}
if mc.InitCmd != "" {
initCmd, err = shlex.Split(mc.InitCmd)
if err != nil {
logger.Error("failed to parse init_cmd", zap.String("init_cmd", mc.InitCmd), zap.Error(err))
err = fmt.Errorf("failed to parse initCmd: %s %w", mc.InitCmd, err)
return
}
logger.Info("initialize container with command", zap.String("init_cmd", mc.InitCmd))
}
} }
c.Info("Creating container builder: hostName=", hostName, ", domainName=", domainName, ", workDir=", workDir) logger.Info("creating container builder",
zap.String("host_name", hostName),
zap.String("domain_name", domainName),
zap.String("work_dir", workDir),
)
return
}
b := &container.Builder{ func tryClone3(
Root: root, c Config,
Mounts: m, envBuilder linuxcontainer.EnvironmentBuilder,
CredGenerator: credGen, cgb cgroup.Cgroup,
Stderr: os.Stderr, cgroupType int,
CloneFlags: unshareFlags, cgroupPool linuxcontainer.CgroupPool,
ExecFile: c.ContainerInitPath, workDir string,
HostName: hostName, seccomp []syscall.SockFilter,
DomainName: domainName, logger *zap.Logger,
WorkDir: workDir, ) pool.EnvBuilder {
ContainerUID: cUID, major, minor := kernelVersion()
ContainerGID: cGID, if cgb == nil || cgroupType != cgroup.TypeV2 || (major < 5 || (major == 5 && minor < 7)) {
return nil
} }
cgb := cgroup.NewBuilder(c.CgroupPrefix).WithCPUAcct().WithMemory().WithPids() logger.Info("running kernel >= 5.7 with cgroup V2, trying faster clone3(CLONE_INTO_CGROUP)",
if c.Cpuset != "" { zap.Int("major", major), zap.Int("minor", minor))
cgb = cgb.WithCPUSet()
} b := linuxcontainer.NewEnvBuilder(linuxcontainer.Config{
if c.EnableCPURate { Builder: envBuilder,
cgb = cgb.WithCPU() CgroupPool: cgroupPool,
} WorkDir: workDir,
cgb, err = cgb.FilterByEnv() Cpuset: c.Cpuset,
CPURate: c.EnableCPURate,
Seccomp: seccomp,
CgroupFd: true,
})
e, err := b.Build()
if err != nil { if err != nil {
return nil, err logger.Info("environment build failed", zap.Error(err))
} return nil
c.Info("Test created cgroup builder with:", cgb)
if cg, err := cgb.Build(); err != nil {
c.Warn("Tested created cgroup with error", err)
c.Warn("Failed back to rlimit / rusage mode")
cgb = nil
} else {
cg.Destroy()
} }
defer e.Destroy()
var cgroupPool pool.CgroupPool p, err := e.Execve(context.TODO(), envexec.ExecveParam{
if cgb != nil { Args: []string{"/usr/bin/env"},
cgroupPool = pool.NewFakeCgroupPool(cgb, c.CPUCfsPeriod) Limit: envexec.Limit{
Memory: 256 << 20,
Proc: 1,
},
})
if err != nil {
logger.Info("environment run failed", zap.Error(err))
return nil
} }
return pool.NewEnvBuilder(b, cgroupPool, workDir, c.Cpuset, c.EnableCPURate), nil <-p.Done()
r := p.Result()
if r.Status == runner.StatusRunnerError {
logger.Info("environment result failed", zap.Stringer("result", r))
return nil
}
return b
} }
type credGen struct { type credGen struct {
cur uint32 cur atomic.Uint32
} }
func newCredGen(start uint32) *credGen { func newCredGen(start uint32) *credGen {
return &credGen{cur: start} rt := &credGen{}
rt.cur.Store(start)
return rt
} }
func (c *credGen) Get() syscall.Credential { func (c *credGen) Get() syscall.Credential {
n := atomic.AddUint32(&c.cur, 1) n := c.cur.Add(1)
return syscall.Credential{ return syscall.Credential{
Uid: n, Uid: n,
Gid: n, Gid: n,
} }
} }
func kernelVersion() (major int, minor int) {
var uname syscall.Utsname
if err := syscall.Uname(&uname); err != nil {
return
}
rl := uname.Release
var values [2]int
vi := 0
value := 0
for _, c := range rl {
if '0' <= c && c <= '9' {
value = (value * 10) + int(c-'0')
} else {
// Note that we're assuming N.N.N here. If we see anything else we are likely to
// misparse it.
values[vi] = value
vi++
if vi >= len(values) {
break
}
value = 0
}
}
switch vi {
case 0:
return 0, 0
case 1:
return values[0], 0
case 2:
return values[0], values[1]
}
return
}

9
env/env_others.go vendored
View File

@ -1,4 +1,4 @@
// +build !windows,!linux,!darwin //go:build !windows && !linux && !darwin
package env package env
@ -6,9 +6,10 @@ import (
"errors" "errors"
"runtime" "runtime"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/env/pool"
"go.uber.org/zap"
) )
func NewBuilder(c Config) (pool.EnvBuilder, error) { func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any, error) {
return nil, errors.New("environment is not support on this platform" + runtime.GOOS) return nil, nil, errors.New("environment is not support on this platform" + runtime.GOOS)
} }

13
env/env_windows.go vendored
View File

@ -1,16 +1,17 @@
package env package env
import ( import (
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/pkg/winc" "github.com/criyle/go-judge/env/winc"
"go.uber.org/zap"
) )
// NewBuilder build a environment builder // NewBuilder build a environment builder
func NewBuilder(c Config) (pool.EnvBuilder, error) { func NewBuilder(c Config, logger *zap.Logger) (pool.EnvBuilder, map[string]any, error) {
b, err := winc.NewBuilder("") b, err := winc.NewBuilder("")
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
c.Info("created winc builder") logger.Info("created winc builder")
return b, nil return b, map[string]any{}, nil
} }

View File

@ -0,0 +1,98 @@
package linuxcontainer
import (
"os"
"sync"
"time"
)
var _ CgroupPool = &CachedCgroupPool{}
// CachedCgroupPool implements cgroup pool but not actually do pool
type CachedCgroupPool struct {
builder CgroupBuilder
cfsPeriod time.Duration
cache chan Cgroup
destroy chan Cgroup
done chan struct{}
wg sync.WaitGroup
err error
}
// NewFakeCgroupPool creates FakeCgroupPool
func NewCachedCgroupPool(builder CgroupBuilder, cfsPeriod time.Duration, workerCount int) CgroupPool {
p := &CachedCgroupPool{
builder: builder,
cfsPeriod: cfsPeriod,
cache: make(chan Cgroup, 4),
destroy: make(chan Cgroup, 4),
done: make(chan struct{}),
}
p.wg.Add(workerCount)
for i := 0; i < workerCount; i++ {
go p.loop()
}
return p
}
func (p *CachedCgroupPool) loop() {
defer p.wg.Done()
var cache Cgroup
for {
if cache == nil {
cg, err := p.builder.Random("")
if err != nil {
p.err = err
close(p.done)
return
}
cache = &wCgroup{cg: cg, cfsPeriod: p.cfsPeriod}
}
select {
case <-p.done:
return
case c := <-p.destroy:
c.Destroy()
case p.cache <- cache:
cache = nil
}
}
}
// Get gets new cgroup
func (p *CachedCgroupPool) Get() (Cgroup, error) {
select {
case <-p.done:
if p.err == nil {
return nil, os.ErrClosed
}
return nil, p.err
case c := <-p.cache:
return c, nil
}
}
// Put destroy the cgroup
func (p *CachedCgroupPool) Put(c Cgroup) {
p.destroy <- c
}
// Shutdown noop
func (p *CachedCgroupPool) Shutdown() {
close(p.done)
p.wg.Wait()
// drain all cgroups
close(p.cache)
for c := range p.cache {
c.Destroy()
}
close(p.destroy)
for c := range p.destroy {
c.Destroy()
}
}

View File

@ -1,4 +1,4 @@
package pool package linuxcontainer
import "time" import "time"
@ -17,14 +17,14 @@ func NewFakeCgroupPool(builder CgroupBuilder, cfsPeriod time.Duration) CgroupPoo
// Get gets new cgroup // Get gets new cgroup
func (f *FakeCgroupPool) Get() (Cgroup, error) { func (f *FakeCgroupPool) Get() (Cgroup, error) {
cg, err := f.builder.Build() cg, err := f.builder.Random("")
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &wCgroup{cg: cg, cfsPeriod: f.cfsPeriod}, nil return &wCgroup{cg: cg, cfsPeriod: f.cfsPeriod}, nil
} }
// Put destory the cgroup // Put destroy the cgroup
func (f *FakeCgroupPool) Put(c Cgroup) { func (f *FakeCgroupPool) Put(c Cgroup) {
c.Destroy() c.Destroy()
} }

View File

@ -0,0 +1,70 @@
package linuxcontainer
import (
"os"
"time"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/pkg/cgroup"
)
var (
_ Cgroup = &wCgroup{}
)
type wCgroup struct {
cg cgroup.Cgroup
cfsPeriod time.Duration
}
func (c *wCgroup) SetCPURate(s uint64) error {
quota := time.Duration(uint64(c.cfsPeriod) * s / 1000)
return c.cg.SetCPUBandwidth(uint64(quota.Microseconds()), uint64(c.cfsPeriod.Microseconds()))
}
func (c *wCgroup) SetCpuset(s string) error {
return c.cg.SetCPUSet([]byte(s))
}
func (c *wCgroup) SetMemoryLimit(s envexec.Size) error {
return c.cg.SetMemoryLimit(uint64(s))
}
func (c *wCgroup) SetProcLimit(l uint64) error {
return c.cg.SetProcLimit(l)
}
func (c *wCgroup) CPUUsage() (time.Duration, error) {
t, err := c.cg.CPUUsage()
return time.Duration(t), err
}
func (c *wCgroup) CurrentMemory() (envexec.Size, error) {
s, err := c.cg.MemoryUsage()
return envexec.Size(s), err
}
func (c *wCgroup) MaxMemory() (envexec.Size, error) {
s, err := c.cg.MemoryMaxUsage()
return envexec.Size(s), err
}
func (c *wCgroup) ProcPeak() (uint64, error) {
return c.cg.ProcessPeak()
}
func (c *wCgroup) AddProc(pid int) error {
return c.cg.AddProc(pid)
}
func (c *wCgroup) Reset() error {
return nil
}
func (c *wCgroup) Destroy() error {
return c.cg.Destroy()
}
func (c *wCgroup) Open() (*os.File, error) {
return c.cg.Open()
}

View File

@ -1,10 +1,11 @@
package pool package linuxcontainer
import ( import (
"os"
"sync" "sync"
"time" "time"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/envexec"
) )
// Cgroup defines interface to limit and monitor resources consumption of a process // Cgroup defines interface to limit and monitor resources consumption of a process
@ -12,14 +13,18 @@ type Cgroup interface {
SetCpuset(string) error SetCpuset(string) error
SetMemoryLimit(envexec.Size) error SetMemoryLimit(envexec.Size) error
SetProcLimit(uint64) error SetProcLimit(uint64) error
SetCPURate(float64) error SetCPURate(uint64) error // 1000 as 1
CPUUsage() (time.Duration, error) CPUUsage() (time.Duration, error)
MemoryUsage() (envexec.Size, error) CurrentMemory() (envexec.Size, error)
MaxMemory() (envexec.Size, error)
ProcPeak() (uint64, error)
AddProc(int) error AddProc(int) error
Reset() error Reset() error
Destroy() error Destroy() error
Open() (*os.File, error)
} }
// CgroupPool implements pool of Cgroup // CgroupPool implements pool of Cgroup
@ -53,7 +58,7 @@ func (w *CgroupListPool) Get() (Cgroup, error) {
return rt, nil return rt, nil
} }
cg, err := w.builder.Build() cg, err := w.builder.Random("")
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,33 +1,49 @@
package pool package linuxcontainer
import ( import (
"fmt" "fmt"
"syscall" "syscall"
"github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-sandbox/container" "github.com/criyle/go-sandbox/container"
) )
// Config specifies configuration to build environment builder
type Config struct {
Builder EnvironmentBuilder
CgroupPool CgroupPool
WorkDir string
Seccomp []syscall.SockFilter
Cpuset string
CPURate bool
CgroupFd bool // whether to enable cgroup fd with clone3, kernel >= 5.7
}
type environmentBuilder struct { type environmentBuilder struct {
builder EnvironmentBuilder builder EnvironmentBuilder
cgPool CgroupPool cgPool CgroupPool
workDir string workDir string
seccomp []syscall.SockFilter
cpuset string cpuset string
cpuRate bool cpuRate bool
cgFd bool
} }
// NewEnvBuilder creates builder for linux container pools // NewEnvBuilder creates builder for linux container pools
func NewEnvBuilder(builder EnvironmentBuilder, cgPool CgroupPool, workDir, cpuset string, cpuRate bool) EnvBuilder { func NewEnvBuilder(c Config) pool.EnvBuilder {
return &environmentBuilder{ return &environmentBuilder{
builder: builder, builder: c.Builder,
cgPool: cgPool, cgPool: c.CgroupPool,
workDir: workDir, workDir: c.WorkDir,
cpuset: cpuset, seccomp: c.Seccomp,
cpuRate: cpuRate, cpuset: c.Cpuset,
cpuRate: c.CPURate,
cgFd: c.CgroupFd,
} }
} }
// Build creates linux container // Build creates linux container
func (b *environmentBuilder) Build() (Environment, error) { func (b *environmentBuilder) Build() (pool.Environment, error) {
m, err := b.builder.Build() m, err := b.builder.Build()
if err != nil { if err != nil {
return nil, err return nil, err
@ -44,7 +60,10 @@ func (b *environmentBuilder) Build() (Environment, error) {
Environment: m, Environment: m,
cgPool: b.cgPool, cgPool: b.cgPool,
wd: wd[0], wd: wd[0],
workDir: b.workDir,
cpuset: b.cpuset, cpuset: b.cpuset,
cpuRate: b.cpuRate, cpuRate: b.cpuRate,
seccomp: b.seccomp,
cgFd: b.cgFd,
}, nil }, nil
} }

242
env/linuxcontainer/environment_linux.go vendored Normal file
View File

@ -0,0 +1,242 @@
package linuxcontainer
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"syscall"
"time"
"github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/container"
"github.com/criyle/go-sandbox/pkg/cgroup"
"github.com/criyle/go-sandbox/pkg/rlimit"
"github.com/criyle/go-sandbox/runner"
"golang.org/x/sys/unix"
)
var _ envexec.Environment = &environ{}
// environ defines interface to access container resources
type environ struct {
container.Environment
cgPool CgroupPool
wd *os.File // container work dir
workDir string
cpuset string
seccomp []syscall.SockFilter
cpuRate bool
cgFd bool
}
// Destroy destroys the environment
func (c *environ) Destroy() error {
return c.Environment.Destroy()
}
func (c *environ) Reset() error {
return c.Environment.Reset()
}
// Execve execute process inside the environment
func (c *environ) Execve(ctx context.Context, param envexec.ExecveParam) (envexec.Process, error) {
var (
cg Cgroup
syncFunc func(int) error
err error
cgFd uintptr
)
limit := param.Limit
if c.cgPool != nil {
cg, err = c.cgPool.Get()
if err != nil {
return nil, fmt.Errorf("execve: failed to get cgroup: %w", err)
}
if err := c.setCgroupLimit(cg, limit); err != nil {
return nil, err
}
if c.cgFd {
f, err := cg.Open()
if err != nil {
return nil, fmt.Errorf("execve: failed to get cgroup fd: %w", err)
}
defer f.Close()
cgFd = f.Fd()
} else {
syncFunc = cg.AddProc
}
}
rLimits := rlimit.RLimits{
CPU: uint64(limit.Time.Truncate(time.Second)/time.Second) + 1,
FileSize: limit.Output.Byte(),
Stack: limit.Stack.Byte(),
OpenFile: limit.OpenFile,
DisableCore: true,
}
if limit.DataSegment || c.cgPool == nil {
rLimits.Data = limit.Memory.Byte()
}
if limit.AddressSpace {
rLimits.AddressSpace = limit.Memory.Byte()
}
// wait for sync or error before turn (avoid file close before pass to child process)
syncDone := make(chan struct{})
p := container.ExecveParam{
Args: param.Args,
Env: param.Env,
Files: param.Files,
CTTY: param.TTY,
ExecFile: param.ExecFile,
RLimits: rLimits.PrepareRLimit(),
Seccomp: c.seccomp,
SyncFunc: func(pid int) error {
defer close(syncDone)
if syncFunc != nil {
return syncFunc(pid)
}
return nil
},
SyncAfterExec: syncFunc == nil,
CgroupFD: cgFd,
}
proc := newProcess(func() runner.Result {
return c.Environment.Execve(ctx, p)
}, cg, c.cgPool)
select {
case <-proc.done:
case <-syncDone:
}
return proc, nil
}
// WorkDir returns opened work directory, should not close after
func (c *environ) WorkDir() *os.File {
c.wd.Seek(0, 0)
return c.wd
}
// Open opens file relative to work directory
func (c *environ) Open(path string, flags int, perm os.FileMode) (*os.File, error) {
if filepath.IsAbs(path) {
var err error
path, err = filepath.Rel(c.workDir, path)
if err != nil {
return nil, fmt.Errorf("openatworkdir: %w", err)
}
}
fd, err := syscall.Openat(int(c.wd.Fd()), path, flags|syscall.O_CLOEXEC, uint32(perm))
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
}
f := os.NewFile(uintptr(fd), path)
if f == nil {
return nil, fmt.Errorf("openatworkdir: failed to create file")
}
return f, nil
}
// MkdirAll equivalent to os.MkdirAll but in container
func (c *environ) MkdirAll(path string, perm os.FileMode) error {
if path == "" || path == "." {
return nil
}
if filepath.IsAbs(path) {
r, err := filepath.Rel(c.workDir, path)
if err != nil {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.EINVAL}
}
return c.MkdirAll(r, perm)
}
// fast path
wd := int(c.wd.Fd())
var stat unix.Stat_t
err := unix.Fstatat(wd, path, &stat, 0)
if err == nil {
if stat.Mode&syscall.S_IFMT == syscall.S_IFDIR {
return nil
}
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
// slow path
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent.
err = c.MkdirAll(path[:j-1], perm)
if err != nil {
return err
}
}
err = syscall.Mkdirat(wd, path, uint32(perm.Perm()))
if err != nil {
err1 := unix.Fstatat(wd, path, &stat, 0)
if err1 == nil && stat.Mode&syscall.S_IFMT == syscall.S_IFDIR {
return nil
}
return err
}
return nil
}
func (c *environ) Symlink(oldName, newName string) error {
var err error
if filepath.IsAbs(newName) {
newName, err = filepath.Rel(c.workDir, newName)
if err != nil {
return &os.PathError{Op: "symlink", Path: newName, Err: syscall.EINVAL}
}
}
if filepath.IsAbs(oldName) {
oldName, err = filepath.Rel(c.workDir, oldName)
if err != nil {
return &os.PathError{Op: "symlink", Path: oldName, Err: syscall.EINVAL}
}
}
return unix.Symlinkat(oldName, int(c.wd.Fd()), newName)
}
func (c *environ) setCgroupLimit(cg Cgroup, limit envexec.Limit) error {
cpuSet := limit.CPUSet
if cpuSet == "" {
cpuSet = c.cpuset
}
if cpuSet != "" {
if err := cg.SetCpuset(cpuSet); isCgroupSetHasError(err) {
return fmt.Errorf("execve: cgroup: failed to set cpuset limit: %w", err)
}
}
if c.cpuRate && limit.Rate > 0 {
if err := cg.SetCPURate(limit.Rate); isCgroupSetHasError(err) {
return fmt.Errorf("execve: cgroup: failed to set cpu rate limit: %w", err)
}
}
if err := cg.SetMemoryLimit(limit.Memory); isCgroupSetHasError(err) {
return fmt.Errorf("execve: cgroup: failed to set memory limit: %w", err)
}
if err := cg.SetProcLimit(limit.Proc); isCgroupSetHasError(err) {
return fmt.Errorf("execve: cgroup: failed to set process limit: %w", err)
}
return nil
}
func isCgroupSetHasError(err error) bool {
return err != nil && !errors.Is(err, cgroup.ErrNotInitialized) && !errors.Is(err, os.ErrNotExist)
}

View File

@ -1,9 +1,9 @@
package pool package linuxcontainer
import ( import (
"time" "time"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/runner" "github.com/criyle/go-sandbox/runner"
) )
@ -16,7 +16,7 @@ type process struct {
cg Cgroup cg Cgroup
} }
func newProcess(ch <-chan runner.Result, cg Cgroup, cgPool CgroupPool) *process { func newProcess(run func() runner.Result, cg Cgroup, cgPool CgroupPool) *process {
p := &process{ p := &process{
done: make(chan struct{}), done: make(chan struct{}),
cg: cg, cg: cg,
@ -26,19 +26,27 @@ func newProcess(ch <-chan runner.Result, cg Cgroup, cgPool CgroupPool) *process
if cgPool != nil { if cgPool != nil {
defer cgPool.Put(cg) defer cgPool.Put(cg)
} }
p.rt = <-ch p.rt = run()
if cg != nil { p.collectUsage()
if t, err := cg.CPUUsage(); err == nil {
p.rt.Time = t
}
if m, err := cg.MemoryUsage(); err == nil {
p.rt.Memory = m
}
}
}() }()
return p return p
} }
func (p *process) collectUsage() {
if p.cg == nil {
return
}
if t, err := p.cg.CPUUsage(); err == nil {
p.rt.Time = t
}
if m, err := p.cg.MaxMemory(); err == nil && m > 0 {
p.rt.Memory = m
}
if pp, err := p.cg.ProcPeak(); err == nil && pp > 0 {
p.rt.ProcPeak = pp
}
}
func (p *process) Done() <-chan struct{} { func (p *process) Done() <-chan struct{} {
return p.done return p.done
} }
@ -55,7 +63,7 @@ func (p *process) Usage() envexec.Usage {
) )
if p.cg != nil { if p.cg != nil {
t, _ = p.cg.CPUUsage() t, _ = p.cg.CPUUsage()
m, _ = p.cg.MemoryUsage() m, _ = p.cg.CurrentMemory()
} }
return envexec.Usage{ return envexec.Usage{
Time: t, Time: t,

View File

@ -1,4 +1,4 @@
package pool package linuxcontainer
import ( import (
"github.com/criyle/go-sandbox/container" "github.com/criyle/go-sandbox/container"
@ -12,5 +12,5 @@ type EnvironmentBuilder interface {
// CgroupBuilder builds cgroup for runner // CgroupBuilder builds cgroup for runner
type CgroupBuilder interface { type CgroupBuilder interface {
Build() (cg *cgroup.Cgroup, err error) Random(string) (cg cgroup.Cgroup, err error)
} }

View File

@ -1,10 +1,9 @@
package macsandbox package macsandbox
import ( import (
"io/ioutil"
"os" "os"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/env/pool"
) )
var _ pool.EnvBuilder = &Builder{} var _ pool.EnvBuilder = &Builder{}
@ -28,7 +27,7 @@ func NewBuilder(wd string, readablePath, writablePath []string, network bool) po
// Build create a sandbox environment // Build create a sandbox environment
func (b *Builder) Build() (pool.Environment, error) { func (b *Builder) Build() (pool.Environment, error) {
wd, err := ioutil.TempDir(b.wd, "es") wd, err := os.MkdirTemp(b.wd, "es")
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -3,12 +3,12 @@ package macsandbox
import ( import (
"context" "context"
"os" "os"
"path" "path/filepath"
"syscall" "syscall"
"time" "time"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/pkg/forkexec" "github.com/criyle/go-sandbox/pkg/forkexec"
"github.com/criyle/go-sandbox/pkg/rlimit" "github.com/criyle/go-sandbox/pkg/rlimit"
"github.com/criyle/go-sandbox/runner" "github.com/criyle/go-sandbox/runner"
@ -26,6 +26,7 @@ type environment struct {
} }
func (e *environment) Execve(c context.Context, param envexec.ExecveParam) (envexec.Process, error) { func (e *environment) Execve(c context.Context, param envexec.ExecveParam) (envexec.Process, error) {
sTime := time.Now()
rLimits := rlimit.RLimits{ rLimits := rlimit.RLimits{
CPU: uint64(param.Limit.Time.Truncate(time.Second)/time.Second) + 1, CPU: uint64(param.Limit.Time.Truncate(time.Second)/time.Second) + 1,
Data: param.Limit.Memory.Byte(), Data: param.Limit.Memory.Byte(),
@ -55,7 +56,8 @@ func (e *environment) Execve(c context.Context, param envexec.ExecveParam) (enve
go func() { go func() {
defer close(p.done) defer close(p.done)
var sTime, mTime, fTime time.Time mTime := time.Now()
// handle cancel // handle cancel
ctx, cancel := context.WithCancel(c) ctx, cancel := context.WithCancel(c)
defer cancel() defer cancel()
@ -80,12 +82,12 @@ func (e *environment) Execve(c context.Context, param envexec.ExecveParam) (enve
if err == syscall.EINTR { if err == syscall.EINTR {
continue continue
} }
fTime = time.Now()
if err != nil { if err != nil {
p.result.Error = err.Error() p.result.Error = err.Error()
p.result.Status = runner.StatusRunnerError p.result.Status = runner.StatusRunnerError
return return
} }
fTime := time.Now()
p.result = runner.Result{ p.result = runner.Result{
Status: runner.StatusNormal, Status: runner.StatusNormal,
Time: time.Duration(rusage.Utime.Nano()), Time: time.Duration(rusage.Utime.Nano()),
@ -104,6 +106,7 @@ func (e *environment) Execve(c context.Context, param envexec.ExecveParam) (enve
case wstatus.Exited(): case wstatus.Exited():
if status := wstatus.ExitStatus(); status != 0 { if status := wstatus.ExitStatus(); status != 0 {
p.result.Status = runner.StatusNonzeroExitStatus p.result.Status = runner.StatusNonzeroExitStatus
p.result.ExitStatus = status
return return
} }
return return
@ -134,7 +137,15 @@ func (e *environment) WorkDir() *os.File {
} }
func (e *environment) Open(p string, flags int, perm os.FileMode) (*os.File, error) { func (e *environment) Open(p string, flags int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(path.Join(e.wdPath, p), flags, perm) return os.OpenFile(filepath.Join(e.wdPath, p), flags, perm)
}
func (e *environment) MkdirAll(p string, perm os.FileMode) error {
return os.MkdirAll(filepath.Join(e.wdPath, p), perm)
}
func (e *environment) Symlink(oldName, newName string) error {
return os.Symlink(oldName, filepath.Join(e.wdPath, newName))
} }
func (e *environment) Destroy() error { func (e *environment) Destroy() error {
@ -160,7 +171,7 @@ func removeContents(dir string) error {
} }
for _, name := range names { for _, name := range names {
err = os.RemoveAll(path.Join(dir, name)) err = os.RemoveAll(filepath.Join(dir, name))
if err != nil { if err != nil {
return err return err
} }

View File

@ -1,7 +1,7 @@
package macsandbox package macsandbox
import ( import (
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/runner" "github.com/criyle/go-sandbox/runner"
) )

View File

@ -24,31 +24,7 @@ const sandboxTemplate = `(version 1)
(allow signal (target self)) (allow signal (target self))
; sysctls permitted. ; sysctls permitted.
(allow sysctl-read (allow sysctl-read)
(sysctl-name "hw.activecpu")
(sysctl-name "hw.busfrequency_compat")
(sysctl-name "hw.byteorder")
(sysctl-name "hw.cachelinesize_compat")
(sysctl-name "hw.cpufrequency_compat")
(sysctl-name "hw.cputype")
(sysctl-name "hw.logicalcpu_max")
(sysctl-name "hw.machine")
(sysctl-name "hw.ncpu")
(sysctl-name "hw.pagesize_compat")
(sysctl-name "hw.physicalcpu_max")
(sysctl-name "hw.tbfrequency_compat")
(sysctl-name "hw.vectorunit")
(sysctl-name "kern.hostname")
(sysctl-name "kern.maxfilesperproc")
(sysctl-name "kern.osrelease")
(sysctl-name "kern.ostype")
(sysctl-name "kern.osvariant_status")
(sysctl-name "kern.osversion")
(sysctl-name "kern.usrstack64")
(sysctl-name "kern.version")
(sysctl-name "sysctl.proc_cputype")
(sysctl-name "kern.proc.pid.CURRENT_PID")
)
; allow read from dir ; allow read from dir
{{- range $dir := .ReadableDir }} {{- range $dir := .ReadableDir }}
@ -76,7 +52,7 @@ type Profile struct {
Network bool Network bool
} }
// DefaultProfile defines the minimun default profile to run programs // DefaultProfile defines the minimum default profile to run programs
var DefaultProfile = Profile{ var DefaultProfile = Profile{
ReadableDir: []string{"/usr/lib"}, ReadableDir: []string{"/usr/lib"},
} }

74
env/mount_linux.go vendored
View File

@ -2,12 +2,12 @@ package env
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path/filepath"
"github.com/criyle/go-sandbox/container"
"github.com/criyle/go-sandbox/pkg/mount" "github.com/criyle/go-sandbox/pkg/mount"
"gopkg.in/yaml.v2" "github.com/goccy/go-yaml"
) )
// Mount defines single mount point configuration. // Mount defines single mount point configuration.
@ -20,20 +20,30 @@ type Mount struct {
Data string `yaml:"data"` Data string `yaml:"data"`
} }
// Link defines symlinks to be created after mounts
type Link struct {
LinkPath string `yaml:"linkPath"`
Target string `yaml:"target"`
}
// Mounts defines mount points for the container. // Mounts defines mount points for the container.
type Mounts struct { type Mounts struct {
Mount []Mount `yaml:"mount"` Mount []Mount `yaml:"mount"`
WorkDir string `yaml:"workDir"` SymLinks []Link `yaml:"symLink"`
HostName string `yaml:"hostName"` MaskPaths []string `yaml:"maskPath"`
DomainName string `yaml:"domainName"` InitCmd string `yaml:"initCmd"`
UID int `yaml:"uid"` WorkDir string `yaml:"workDir"`
GID int `yaml:"gid"` HostName string `yaml:"hostName"`
Proc bool `yaml:"proc"` DomainName string `yaml:"domainName"`
UID int `yaml:"uid"`
GID int `yaml:"gid"`
Proc bool `yaml:"proc"`
ProcRW bool `yaml:"procrw"`
} }
func readMountConfig(p string) (*Mounts, error) { func readMountConfig(p string) (*Mounts, error) {
var m Mounts var m Mounts
d, err := ioutil.ReadFile(p) d, err := os.ReadFile(p)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -51,12 +61,12 @@ func parseMountConfig(m *Mounts) (*mount.Builder, error) {
} }
for _, mt := range m.Mount { for _, mt := range m.Mount {
target := mt.Target target := mt.Target
if path.IsAbs(target) { if filepath.IsAbs(target) {
target = path.Clean(target[1:]) target = filepath.Clean(target[1:])
} }
source := mt.Source source := mt.Source
if !path.IsAbs(source) { if !filepath.IsAbs(source) {
source = path.Join(wd, source) source = filepath.Join(wd, source)
} }
switch mt.Type { switch mt.Type {
case "bind": case "bind":
@ -64,11 +74,11 @@ func parseMountConfig(m *Mounts) (*mount.Builder, error) {
case "tmpfs": case "tmpfs":
b.WithTmpfs(target, mt.Data) b.WithTmpfs(target, mt.Data)
default: default:
return nil, fmt.Errorf("Invalid mount type") return nil, fmt.Errorf("invalid_mount_type: %v", mt.Type)
} }
} }
if m.Proc { if m.Proc {
b.WithProc() b.WithProcRW(m.ProcRW)
} }
return b, nil return b, nil
} }
@ -80,6 +90,7 @@ func getDefaultMount(tmpFsConf string) *mount.Builder {
WithBind("/lib", "lib", true). WithBind("/lib", "lib", true).
WithBind("/lib64", "lib64", true). WithBind("/lib64", "lib64", true).
WithBind("/usr", "usr", true). WithBind("/usr", "usr", true).
WithBind("/etc/ld.so.cache", "etc/ld.so.cache", true).
// java wants /proc/self/exe as it need relative path for lib // java wants /proc/self/exe as it need relative path for lib
// however, /proc gives interface like /proc/1/fd/3 .. // however, /proc gives interface like /proc/1/fd/3 ..
// it is fine since open that file will be a EPERM // it is fine since open that file will be a EPERM
@ -89,14 +100,43 @@ func getDefaultMount(tmpFsConf string) *mount.Builder {
WithBind("/etc/alternatives", "etc/alternatives", true). WithBind("/etc/alternatives", "etc/alternatives", true).
// fpc wants /etc/fpc.cfg // fpc wants /etc/fpc.cfg
WithBind("/etc/fpc.cfg", "etc/fpc.cfg", true). WithBind("/etc/fpc.cfg", "etc/fpc.cfg", true).
// mono wants /etc/mono
WithBind("/etc/mono", "etc/mono", true).
// go wants /dev/null // go wants /dev/null
WithBind("/dev/null", "dev/null", false). WithBind("/dev/null", "dev/null", false).
// ghc wants /var/lib/ghc // ghc wants /var/lib/ghc
WithBind("/var/lib/ghc", "var/lib/ghc", true). WithBind("/var/lib/ghc", "var/lib/ghc", true).
// javaScript wants /dev/urandom // javaScript wants /dev/urandom
WithBind("/dev/urandom", "dev/urandom", false). WithBind("/dev/urandom", "dev/urandom", false).
// additional devices
WithBind("/dev/random", "dev/random", false).
WithBind("/dev/zero", "dev/zero", false).
WithBind("/dev/full", "dev/full", false).
// work dir // work dir
WithTmpfs("w", tmpFsConf). WithTmpfs("w", tmpFsConf).
// tmp dir // tmp dir
WithTmpfs("tmp", tmpFsConf) WithTmpfs("tmp", tmpFsConf)
} }
var defaultSymLinks = []container.SymbolicLink{
{LinkPath: "/dev/fd", Target: "/proc/self/fd"},
{LinkPath: "/dev/stdin", Target: "/proc/self/fd/0"},
{LinkPath: "/dev/stdout", Target: "/proc/self/fd/1"},
{LinkPath: "/dev/stderr", Target: "/proc/self/fd/2"},
}
var defaultMaskPaths = []string{
"/sys/firmware",
"/sys/devices/virtual/powercap",
"/proc/acpi",
"/proc/asound",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/usr/lib/wsl/drivers",
"/usr/lib/wsl/lib",
}

10
env/noseccomp_linux.go vendored Normal file
View File

@ -0,0 +1,10 @@
//go:build !seccomp
package env
import "syscall"
func readSeccompConf(name string) ([]syscall.SockFilter, error) {
_ = name
return nil, nil
}

2
env/pool/doc.go vendored Normal file
View File

@ -0,0 +1,2 @@
// Package pool provides reference implementation for envexec.EnvironmentPool from EnvBuilder
package pool

View File

@ -3,7 +3,8 @@ package pool
import ( import (
"sync" "sync"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/envexec"
"github.com/criyle/go-judge/worker"
) )
// Environment defines envexec.Environment with destroy // Environment defines envexec.Environment with destroy
@ -26,7 +27,7 @@ type pool struct {
} }
// NewPool returns a pool for EnvBuilder // NewPool returns a pool for EnvBuilder
func NewPool(builder EnvBuilder) envexec.EnvironmentPool { func NewPool(builder EnvBuilder) worker.EnvironmentPool {
return &pool{ return &pool{
builder: builder, builder: builder,
} }
@ -45,11 +46,27 @@ func (p *pool) Get() (envexec.Environment, error) {
} }
func (p *pool) Put(env envexec.Environment) { func (p *pool) Put(env envexec.Environment) {
e, _ := env.(Environment) e, ok := env.(Environment)
e.Reset() if !ok {
panic("invalid environment put")
}
// If contain died after execution, don't put it into pool and destroy it
if err := e.Reset(); err != nil {
e.Destroy()
return
}
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
p.env = append(p.env, e) p.env = append(p.env, e)
} }
func (p *pool) Destroy() {
p.mu.Lock()
defer p.mu.Unlock()
for _, e := range p.env {
e.Destroy()
}
}

49
env/seccomp_linux.go vendored Normal file
View File

@ -0,0 +1,49 @@
//go:build seccomp
package env
import (
"os"
"syscall"
"github.com/elastic/go-seccomp-bpf"
"github.com/elastic/go-ucfg/yaml"
"golang.org/x/net/bpf"
)
func readSeccompConf(name string) ([]syscall.SockFilter, error) {
conf, err := yaml.NewConfigWithFile(name)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
var policy seccomp.Policy
if err := conf.Unpack(&policy); err != nil {
return nil, err
}
inst, err := policy.Assemble()
if err != nil {
return nil, err
}
rawInst, err := bpf.Assemble(inst)
if err != nil {
return nil, err
}
return toSockFilter(rawInst), nil
}
func toSockFilter(raw []bpf.RawInstruction) []syscall.SockFilter {
filter := make([]syscall.SockFilter, 0, len(raw))
for _, instruction := range raw {
filter = append(filter, syscall.SockFilter{
Code: instruction.Op,
Jt: instruction.Jt,
Jf: instruction.Jf,
K: instruction.K,
})
}
return filter
}

View File

@ -1,10 +1,9 @@
package winc package winc
import ( import (
"io/ioutil"
"os" "os"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/env/pool"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
@ -43,7 +42,7 @@ func (b *builder) Build() (pool.Environment, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
workDir, err := ioutil.TempDir(b.root, wdPrefix) workDir, err := os.MkdirTemp(b.root, wdPrefix)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -51,7 +50,7 @@ func (b *builder) Build() (pool.Environment, error) {
windows.LABEL_SECURITY_INFORMATION, nil, nil, nil, sacl); err != nil { windows.LABEL_SECURITY_INFORMATION, nil, nil, nil, sacl); err != nil {
return nil, err return nil, err
} }
tmpDir, err := ioutil.TempDir(b.root, tmpPrefix) tmpDir, err := os.MkdirTemp(b.root, tmpPrefix)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -7,13 +7,14 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path" "path/filepath"
"syscall" "syscall"
"time"
"unicode/utf16" "unicode/utf16"
"unsafe" "unsafe"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/env/pool"
"github.com/criyle/go-judge/pkg/pool" "github.com/criyle/go-judge/envexec"
"github.com/criyle/go-sandbox/runner" "github.com/criyle/go-sandbox/runner"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
@ -36,6 +37,7 @@ type Environment struct {
// Execve implements windows sandbox .. // Execve implements windows sandbox ..
func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (proc envexec.Process, err error) { func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (proc envexec.Process, err error) {
startTime := time.Now()
if len(param.Files) != 3 { if len(param.Files) != 3 {
return nil, errFileCount return nil, errFileCount
} }
@ -69,7 +71,10 @@ func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (pr
} }
deskName := fmt.Sprintf("winc_%08x_%s", windows.GetCurrentProcessId(), hex.EncodeToString(random)) deskName := fmt.Sprintf("winc_%08x_%s", windows.GetCurrentProcessId(), hex.EncodeToString(random))
deskNameW := syscall.StringToUTF16Ptr(deskName) deskNameW, err := syscall.UTF16PtrFromString(deskName)
if err != nil {
return nil, err
}
sa := windows.SecurityAttributes{ sa := windows.SecurityAttributes{
Length: uint32(unsafe.Sizeof(windows.SecurityAttributes{})), Length: uint32(unsafe.Sizeof(windows.SecurityAttributes{})),
@ -106,8 +111,14 @@ func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (pr
} }
cmdLine := makeCmdLine(param.Args) cmdLine := makeCmdLine(param.Args)
cmdLineW := syscall.StringToUTF16Ptr(cmdLine) cmdLineW, err := syscall.UTF16PtrFromString(cmdLine)
dirW := syscall.StringToUTF16Ptr(e.root) if err != nil {
return nil, err
}
dirW, err := syscall.UTF16PtrFromString(e.root)
if err != nil {
return nil, err
}
var startupInfo syscall.StartupInfo var startupInfo syscall.StartupInfo
startupInfo.Cb = uint32(unsafe.Sizeof(startupInfo)) startupInfo.Cb = uint32(unsafe.Sizeof(startupInfo))
@ -165,6 +176,7 @@ func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (pr
return nil, err return nil, err
} }
setUpTime := time.Now()
// resume thread // resume thread
if _, err := windows.ResumeThread(windows.Handle(processInfo.Thread)); err != nil { if _, err := windows.ResumeThread(windows.Handle(processInfo.Thread)); err != nil {
return nil, err return nil, err
@ -197,7 +209,8 @@ func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (pr
defer close(done) defer close(done)
var ( var (
qty, key uint32 qty uint32
key uintptr
overlapped *windows.Overlapped overlapped *windows.Overlapped
) )
result := runner.Result{ result := runner.Result{
@ -250,13 +263,15 @@ func (e *Environment) Execve(ctx context.Context, param envexec.ExecveParam) (pr
result.ExitStatus = int(exitCode) result.ExitStatus = int(exitCode)
// collect usage // collect usage
t, m, err := getJobOjbectUsage(hJob) t, m, err := getJobObjectUsage(hJob)
if err != nil { if err != nil {
procSet.result = runner.Result{Status: runner.StatusRunnerError, Error: err.Error()} procSet.result = runner.Result{Status: runner.StatusRunnerError, Error: err.Error()}
return return
} }
result.Time = t result.Time = t
result.Memory = m result.Memory = m
result.SetUpTime = setUpTime.Sub(startTime)
result.RunningTime = time.Since(setUpTime)
procSet.result = result procSet.result = result
}() }()
@ -271,16 +286,30 @@ func (e *Environment) WorkDir() *os.File {
// Open opens file related to root // Open opens file related to root
func (e *Environment) Open(p string, flags int, perm os.FileMode) (*os.File, error) { func (e *Environment) Open(p string, flags int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(path.Join(e.root, p), flags, perm) return os.OpenFile(filepath.Join(e.root, p), flags, perm)
} }
// Destroy destorys the environment func (e *Environment) MkdirAll(p string, perm os.FileMode) error {
return os.MkdirAll(filepath.Join(e.root, p), perm)
}
func (e *Environment) Symlink(oldName, newName string) error {
return os.Symlink(oldName, filepath.Join(e.root, newName))
}
// Destroy destroys the environment
func (e *Environment) Destroy() error { func (e *Environment) Destroy() error {
// error is ignorable for destroy operation
os.RemoveAll(e.root)
os.RemoveAll(e.tmp)
return e.wd.Close() return e.wd.Close()
} }
// Reset remove all files in root directory // Reset remove all files in root directory
func (e *Environment) Reset() error { func (e *Environment) Reset() error {
if err := removeContents(e.tmp); err != nil {
return err
}
return removeContents(e.root) return removeContents(e.root)
} }
@ -298,7 +327,7 @@ func removeContents(dir string) error {
} }
for _, name := range names { for _, name := range names {
err = os.RemoveAll(path.Join(dir, name)) err = os.RemoveAll(filepath.Join(dir, name))
if err != nil { if err != nil {
return err return err
} }

View File

@ -4,7 +4,7 @@ import (
"time" "time"
"unsafe" "unsafe"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/envexec"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )

View File

@ -4,7 +4,7 @@ import (
"time" "time"
"unsafe" "unsafe"
"github.com/criyle/go-judge/pkg/envexec" "github.com/criyle/go-judge/envexec"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
) )
@ -27,14 +27,14 @@ func (p *process) Result() envexec.RunnerResult {
} }
func (p *process) Usage() envexec.Usage { func (p *process) Usage() envexec.Usage {
t, m, _ := getJobOjbectUsage(p.hJob) t, m, _ := getJobObjectUsage(p.hJob)
return envexec.Usage{ return envexec.Usage{
Time: t, Time: t,
Memory: m, Memory: m,
} }
} }
func getJobOjbectUsage(hJob windows.Handle) (time.Duration, envexec.Size, error) { func getJobObjectUsage(hJob windows.Handle) (time.Duration, envexec.Size, error) {
basicInfo := new(JOBOBJECT_BASIC_ACCOUNTING_INFORMATION) basicInfo := new(JOBOBJECT_BASIC_ACCOUNTING_INFORMATION)
if _, err := QueryInformationJobObject(hJob, JobObjectBasicAccountingInformation, if _, err := QueryInformationJobObject(hJob, JobObjectBasicAccountingInformation,
uintptr(unsafe.Pointer(basicInfo)), uint32(unsafe.Sizeof(*basicInfo)), nil); err != nil { uintptr(unsafe.Pointer(basicInfo)), uint32(unsafe.Sizeof(*basicInfo)), nil); err != nil {

View File

@ -19,6 +19,7 @@ const (
var ( var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
errERROR_EINVAL error = syscall.EINVAL
) )
// errnoErr returns common boxed Errno values, to prevent // errnoErr returns common boxed Errno values, to prevent
@ -26,7 +27,7 @@ var (
func errnoErr(e syscall.Errno) error { func errnoErr(e syscall.Errno) error {
switch e { switch e {
case 0: case 0:
return nil return errERROR_EINVAL
case errnoERROR_IO_PENDING: case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING return errERROR_IO_PENDING
} }
@ -38,32 +39,48 @@ func errnoErr(e syscall.Errno) error {
var ( var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
moduser32 = windows.NewLazySystemDLL("user32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
moduser32 = windows.NewLazySystemDLL("user32.dll")
procCreateRestrictedToken = modadvapi32.NewProc("CreateRestrictedToken") procCreateRestrictedToken = modadvapi32.NewProc("CreateRestrictedToken")
procGetThreadDesktop = moduser32.NewProc("GetThreadDesktop")
procGetProcessWindowStation = moduser32.NewProc("GetProcessWindowStation")
procCreateDesktopW = moduser32.NewProc("CreateDesktopW")
procCloseDesktop = moduser32.NewProc("CloseDesktop")
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
procCloseDesktop = moduser32.NewProc("CloseDesktop")
procCreateDesktopW = moduser32.NewProc("CreateDesktopW")
procGetProcessWindowStation = moduser32.NewProc("GetProcessWindowStation")
procGetThreadDesktop = moduser32.NewProc("GetThreadDesktop")
) )
func CreateRestrictedToken(existingToken windows.Token, flags uint32, disableSidCount uint32, sidsToDisable *windows.SIDAndAttributes, deletePrivilegeCount uint32, privilegesToDelete *windows.SIDAndAttributes, restrictedSidCount uint32, sidToRestrict *windows.SIDAndAttributes, newTokenHandle *windows.Token) (err error) { func CreateRestrictedToken(existingToken windows.Token, flags uint32, disableSidCount uint32, sidsToDisable *windows.SIDAndAttributes, deletePrivilegeCount uint32, privilegesToDelete *windows.SIDAndAttributes, restrictedSidCount uint32, sidToRestrict *windows.SIDAndAttributes, newTokenHandle *windows.Token) (err error) {
r1, _, e1 := syscall.Syscall9(procCreateRestrictedToken.Addr(), 9, uintptr(existingToken), uintptr(flags), uintptr(disableSidCount), uintptr(unsafe.Pointer(sidsToDisable)), uintptr(deletePrivilegeCount), uintptr(unsafe.Pointer(privilegesToDelete)), uintptr(restrictedSidCount), uintptr(unsafe.Pointer(sidToRestrict)), uintptr(unsafe.Pointer(newTokenHandle))) r1, _, e1 := syscall.Syscall9(procCreateRestrictedToken.Addr(), 9, uintptr(existingToken), uintptr(flags), uintptr(disableSidCount), uintptr(unsafe.Pointer(sidsToDisable)), uintptr(deletePrivilegeCount), uintptr(unsafe.Pointer(privilegesToDelete)), uintptr(restrictedSidCount), uintptr(unsafe.Pointer(sidToRestrict)), uintptr(unsafe.Pointer(newTokenHandle)))
if r1 == 0 { if r1 == 0 {
if e1 != 0 { err = errnoErr(e1)
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
} }
return return
} }
func GetThreadDesktop(threadID uint32) (h HDESK) { func QueryInformationJobObject(job windows.Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32, lpReturnLength *uint32) (ret int, err error) {
r0, _, _ := syscall.Syscall(procGetThreadDesktop.Addr(), 1, uintptr(threadID), 0, 0) r0, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
ret = int(r0)
if ret == 0 {
err = errnoErr(e1)
}
return
}
func CloseDesktop(hDesktop HDESK) (err error) {
r1, _, e1 := syscall.Syscall(procCloseDesktop.Addr(), 1, uintptr(hDesktop), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func CreateDesktop(lpszDesktop *uint16, lpszDevice *uint16, pDevmode uintptr, dwFlags uint32, dwDesiredAccess windows.ACCESS_MASK, lpsa *windows.SecurityAttributes) (h HDESK, err error) {
r0, _, e1 := syscall.Syscall6(procCreateDesktopW.Addr(), 6, uintptr(unsafe.Pointer(lpszDesktop)), uintptr(unsafe.Pointer(lpszDevice)), uintptr(pDevmode), uintptr(dwFlags), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpsa)))
h = HDESK(r0) h = HDESK(r0)
if h == 0 {
err = errnoErr(e1)
}
return return
} }
@ -73,40 +90,8 @@ func GetProcessWindowStation() (h HWINSTA) {
return return
} }
func CreateDesktop(lpszDesktop *uint16, lpszDevice *uint16, pDevmode uintptr, dwFlags uint32, dwDesiredAccess windows.ACCESS_MASK, lpsa *windows.SecurityAttributes) (h HDESK, err error) { func GetThreadDesktop(threadID uint32) (h HDESK) {
r0, _, e1 := syscall.Syscall6(procCreateDesktopW.Addr(), 6, uintptr(unsafe.Pointer(lpszDesktop)), uintptr(unsafe.Pointer(lpszDevice)), uintptr(pDevmode), uintptr(dwFlags), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpsa))) r0, _, _ := syscall.Syscall(procGetThreadDesktop.Addr(), 1, uintptr(threadID), 0, 0)
h = HDESK(r0) h = HDESK(r0)
if h == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func CloseDesktop(hDesktop HDESK) (err error) {
r1, _, e1 := syscall.Syscall(procCloseDesktop.Addr(), 1, uintptr(hDesktop), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func QueryInformationJobObject(job windows.Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32, lpReturnLength *uint32) (ret int, err error) {
r0, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
ret = int(r0)
if ret == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return return
} }

159
envexec/cmd.go Normal file
View File

@ -0,0 +1,159 @@
package envexec
import (
"context"
"fmt"
"os"
"time"
"github.com/criyle/go-sandbox/runner"
)
// Size represent data size in bytes
type Size = runner.Size
// RunnerResult represent process finish result
type RunnerResult = runner.Result
// Cmd defines instruction to run a program in container environment
type Cmd struct {
Environment Environment
// file contents to copyin before exec
CopyIn map[string]File
// symbolic link to be created before exec
SymLinks map[string]string
// exec argument, environment
Args []string
Env []string
// Files for the executing command
Files []File
TTY bool // use pty as input / output
// resource limits
TimeLimit time.Duration
MemoryLimit Size
StackLimit Size
ExtraMemoryLimit Size
OutputLimit Size
ProcLimit uint64
OpenFileLimit uint64
CPURateLimit uint64
CPUSetLimit string
// Waiter is called after cmd starts and it should return
// once time limit exceeded.
// return true to as TLE and false as normal exits (context finished)
Waiter func(context.Context, Process) bool
// file names to copyout after exec
CopyOut []CmdCopyOutFile
CopyOutMax Size // file size limit
CopyOutTruncate bool
// CopyOutDir specifies a dir to dump all /w content
CopyOutDir string
// additional memory option
AddressSpaceLimit bool
DataSegmentLimit bool
}
// CmdCopyOutFile defines the file to be copy out after cmd execution
type CmdCopyOutFile struct {
Name string // Name is the file out to copyOut
Optional bool // Optional ignores the file if not exists
}
// Result defines the running result for single Cmd
type Result struct {
Status Status
ExitStatus int
Error string // error
Time time.Duration
RunTime time.Duration
Memory Size // byte
ProcPeak uint64 // maximum processes ever running
// Files stores copy out files
Files map[string]*os.File
// FileError stores file errors details
FileError []FileError
}
// FileErrorType defines the location that file operation fails
type FileErrorType int
// FileError enums
const (
ErrCopyInOpenFile FileErrorType = iota
ErrCopyInCreateDir
ErrCopyInCreateFile
ErrCopyInCopyContent
ErrCopyOutOpen
ErrCopyOutNotRegularFile
ErrCopyOutSizeExceeded
ErrCopyOutCreateFile
ErrCopyOutCopyContent
ErrCollectSizeExceeded
ErrSymlink
)
// FileError defines the location, file name and the detailed message for a failed file operation
type FileError struct {
Name string `json:"name"`
Type FileErrorType `json:"type"`
Message string `json:"message,omitempty"`
}
var fileErrorString = []string{
"CopyInOpenFile",
"CopyInCreateDir",
"CopyInCreateFile",
"CopyInCopyContent",
"CopyOutOpen",
"CopyOutNotRegularFile",
"CopyOutSizeExceeded",
"CopyOutCreateFile",
"CopyOutCopyContent",
"CollectSizeExceeded",
}
var fileErrorStringReverse = make(map[string]FileErrorType)
func (t FileErrorType) String() string {
v := int(t)
if v >= 0 && v < len(fileErrorString) {
return fileErrorString[v]
}
return ""
}
// MarshalJSON encodes file error into json string
func (t FileErrorType) MarshalJSON() ([]byte, error) {
return []byte(`"` + t.String() + `"`), nil
}
// UnmarshalJSON decodes file error from json string
func (t *FileErrorType) UnmarshalJSON(b []byte) error {
str := string(b)
v, ok := fileErrorStringReverse[str]
if !ok {
return fmt.Errorf("%s is not file error type", str)
}
*t = v
return nil
}
func init() {
for i, v := range fileErrorString {
fileErrorStringReverse[`"`+v+`"`] = FileErrorType(i)
}
}

View File

@ -1,16 +1,16 @@
// Package envexec provides utility function to run program in restricted environments // Package envexec provides utility function to run program in restricted environments
// through container and cgroup. // through container and cgroup.
// //
// Cmd // # Cmd
// //
// Cmd defines single program to run, including copyin files before exec, run the program and copy // Cmd defines single program to run, including copyin files before exec, run the program and copy
// out files after exec // out files after exec
// //
// Single // ## Single
// //
// Single defines single Cmd with Environment and Cgroup Pool // Single defines single Cmd with Environment and Cgroup Pool
// //
// Group // ## Group
// //
// Group defines multiple Cmd with Environment and Cgroup Pool, together with Pipe mapping between // Group defines multiple Cmd with Environment and Cgroup Pool, together with Pipe mapping between
// different Cmd // different Cmd

Some files were not shown because too many files have changed in this diff Show More