Merge v2 dev to main
Some checks failed
App Builder / build (ubuntu-latest) (push) Has been cancelled
App Builder / build (windows-latest) (push) Has been cancelled
App Builder / Deploy (push) Has been cancelled
App Builder / Cleanup actions (push) Has been cancelled

Push v2 app to main
This commit is contained in:
Prohurtz 2025-04-03 11:54:11 -07:00 committed by GitHub
commit 240a5d464d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
92 changed files with 273869 additions and 2608 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

6
.coveragerc Normal file
View File

@ -0,0 +1,6 @@
[run]
omit=
tests/*
conftest.py
source =
EyeTrackApp

View File

@ -0,0 +1,48 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile
{
"name": "EyeTrackVR App Backend",
"build": {
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerfile": "../Dockerfile"
},
"features": {
"ghcr.io/akhildevelops/devcontainer-features/apt:0": {},
"ghcr.io/devcontainers/features/python:1": {
"installTools": true,
"optimize": true,
"version": "3.10"
}
},
"customizations": {
"vscode": {
"settings": {},
"extensions": [
"streetsidesoftware.code-spell-checker",
"ms-python.python",
"ms-python.vscode-pylance",
"ms-azuretools.vscode-docker",
"wayou.vscode-todo-highlight",
"gruntfuggly.todo-tree",
"eamodio.gitlens",
"github.vscode-pull-request-github",
"rangav.vscode-thunder-client"
]
}
},
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [
8000
],
// Uncomment the next line to run commands after the container is created.
"postCreateCommand": "make install && make"
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "devcontainer"
}

27
.dockerignore Normal file
View File

@ -0,0 +1,27 @@
**/__pycache__
**/.venv
**/.classpath
**/.dockerignore
**/.env
**/.git
**/.gitignore
**/.project
**/.settings
**/.toolstarget
**/.vs
**/.vscode
**/*.*proj.user
**/*.dbmdl
**/*.jfm
**/bin
**/charts
**/docker-compose*
**/compose*
**/Dockerfile*
**/node_modules
**/npm-debug.log
**/obj
**/secrets.dev.yaml
**/values.dev.yaml
LICENSE
README.md

64
.github/scripts/prepareCMD.sh vendored Normal file
View File

@ -0,0 +1,64 @@
#!/bin/bash
# create a vairable to hold a passed in argument
# this argument is the next release version
# this is passed in from the .releaserc file
sudo apt-get install -y jq
nextReleaseVersion=$1
TARGET_KEY="version"
# parse all letters a-z and A-Z and replace with nothing
# this will remove all letters from the version string
# this is to ensure that the version string is a valid semver
# check if there is a letter in the version string
# if there is a letter, then remove it
# if there is no letter, then do nothing
if [[ $nextReleaseVersion =~ [a-zA-Z] ]]; then
nextReleaseVersion=$(echo $nextReleaseVersion | sed 's/[a-zA-Z]//g')
# check if there is a dash in the version string
# if there is a dash, then replace it with a dot
# if there is no dash, then do nothing
if [[ $nextReleaseVersion =~ "-" ]]; then
# parse all dashes and replace with dots
# this is to ensure that the version string is a valid semver
nextReleaseVersion=$(echo $nextReleaseVersion | sed 's/-/./g')
# remove everything after the third dot and the dot itself
# this is to ensure that the version string is a valid semver
nextReleaseVersion=$(echo $nextReleaseVersion | sed 's/\.[0-9]*$//g')
# remove the last dot
nextReleaseVersion=$(echo $nextReleaseVersion | sed 's/\.$//g')
fi
fi
# print the next release version
printf "[prepareCMD.sh]: Next version: ${nextReleaseVersion}\n"
# This script is used to execute the prepareCMD.sh script on the remote host
printf "[prepareCMD.sh]: Executing prepareCMD.sh on remote host \n"
#printf "Update the version in the Cargo.toml file \n"
#
#sed -i "s/version = \"[0-9\\.]*\"/version = \"${nextReleaseVersion}\"/g" ./GUI/ETVR/src-tauri/Cargo.toml
# Install the dependencies for toml file
printf "[prepareCMD.sh]: Installing the dependencies for the toml file \n"
pip3 install yq
export PATH="~/.local/bin:$PATH"
source ~/.bashrc
tmp=$(mktemp)
tomlq -t --arg version "$nextReleaseVersion" '.tool.poetry.version |= $version' ./pyproject.toml > "$tmp" && mv "$tmp" ./pyproject.toml -f
# validate the Cargo.toml file
#printf "[prepareCMD.sh]: Validating the Cargo.toml file \n"
#cat ./GUI/ETVR/src-tauri/Cargo.toml
printf "[prepareCMD.sh]: Done, continuing with release. \n"

View File

@ -1,29 +0,0 @@
name: Build and Deploy to Github Pages
on:
push:
branches: [docs]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build_and_deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# Use GitHub Actions' cache to cache dependencies on servers
- uses: actions/cache@v2
with:
path: vendor/bundle
key: ${{ runner.os }}-gems-${{ hashFiles('**/Gemfile.lock') }}
restore-keys: |
${{ runner.os }}-gems-
# Use GitHub Deploy Action to build and deploy to Github
- uses: jeffreytse/jekyll-deploy-action@v0.3.1
with:
provider: 'github'
token: ${{ secrets.GITHUB_TOKEN }} # It's your Personal Access Token(PAT)
jekyll_src: './docs' # Default is root directory

129
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,129 @@
name: App Builder
on:
workflow_dispatch:
push:
tags:
- "v*"
branches:
- main
- master
- HSF-and-new-algos-feature-branch
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: write
jobs:
build:
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
defaults:
run:
shell: bash
runs-on: ${{ matrix.os }}
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
token: ${{ env.GITHUB_TOKEN }}
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: 3.10.11
- name: Setup Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true
- name: Load cached wheels
id: cached-pip-wheels
uses: actions/cache@v3
with:
path: ~/.cache
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
#----------------------------------------------
# load cached venv if cache exists
#----------------------------------------------
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
- name: Install python dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install
- name: Install Linux dependencies
if: ${{ matrix.os == 'ubuntu-latest'}}
run: sudo apt install build-essential curl wget libssl-dev
- run: source $VENV
- name: Build App
run: poetry run pyinstaller --noconfirm EyeTrackApp/eyetrackapp.spec EyeTrackApp/eyetrackapp.py
- name: Rename Linux Binary
if: ${{ matrix.os == 'ubuntu-latest' }}
run: |
ls -a build
mv ./dist/eyetrackapp ./dist/eyetrackapp.bin
- uses: actions/upload-artifact@v2
with:
name: production-files
path: dist
deploy:
runs-on: ubuntu-latest
name: Deploy
needs: [build]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Create Directory
run: mkdir -p dist
- name: Download artifact
uses: actions/download-artifact@v2
with:
name: production-files
path: ./dist
- name: Setup node
uses: actions/setup-node@v3
with:
node-version: 18
- run: npm install -g conventional-changelog-conventionalcommits@6
- run: npm install -g semantic-release@v19.0.5
- run: npm install -g @semantic-release/exec
- run: npm install -g @semantic-release/git
- run: npm install -g @semantic-release/release-notes-generator
- run: npm install -g @semantic-release/changelog
- run: npm install -g @semantic-release/github
- name: Release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
chmod +x ./.github/scripts/prepareCMD.sh
semantic-release
cleanup:
name: Cleanup actions
needs:
- deploy
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: "♻️ remove build artifacts"
uses: geekyeggo/delete-artifact@v1
with:
name: production-files

View File

@ -1,92 +0,0 @@
name: Tasmota CI
on:
workflow_dispatch:
pull_request:
branches: development
paths:
- '**.c'
- '**.cpp'
- '**.h'
- '**.hpp'
- '**.ino'
- '**.json'
- '**.properties'
- 'pio-tools/*.py'
- '**.ini'
- '.github/workflows/build_all_the_things.yml'
jobs:
base-images:
runs-on: ubuntu-latest
if: github.repository == 'arendst/Tasmota'
strategy:
matrix:
variant:
- tasmota
- tasmota4M
- tasmota-display
- tasmota-ir
- tasmota-knx
- tasmota-lite
- tasmota-minimal
- tasmota-sensors
- tasmota-zbbridge
- tasmota-zigbee
- tasmota32
- tasmota32-webcam
- tasmota32-bluetooth
- tasmota32-core2
- tasmota32-display
- tasmota32-ir
- tasmota32-lvgl
- tasmota32s2
- tasmota32c3
- tasmota32c3usb
- tasmota32solo1
- tasmota32solo1-safeboot
- tasmota32-safeboot
- tasmota32c3-safeboot
- tasmota32c3usb-safeboot
- tasmota32s2-safeboot
- tasmota32s3-safeboot
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
- name: Install dependencies
run: |
#python -m pip install --upgrade pip
pip install -U platformio
#platformio upgrade --dev
#platformio update
- name: Run PlatformIO
run: platformio run -e ${{ matrix.variant }}
- uses: actions/upload-artifact@v2
with:
name: firmware
path: ./build_output
language-images:
runs-on: ubuntu-latest
if: github.repository == 'arendst/Tasmota'
strategy:
matrix:
variant: [ tasmota ]
language: [ AF, BG, BR, CN, CZ, DE, ES, FR, FY, GR, HE, HU, IT, KO, NL, PL, PT, RO, RU, SE, SK, TR, TW, UK, VN ]
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
- name: Install dependencies
run: |
#python -m pip install --upgrade pip
pip install -U platformio
#platformio upgrade --dev
#platformio update
- name: Run PlatformIO
run: platformio run -e ${{ matrix.variant }}-${{ matrix.language }}
- uses: actions/upload-artifact@v2
with:
name: firmware
path: ./build_output

View File

@ -1,162 +0,0 @@
name: Build_development
on:
workflow_dispatch: # Manually start a workflow
push:
branches: development
paths-ignore:
- '.github/**' # Ignore changes towards the .github directory
- '**.md' # Do no build if *.md files changes
# Ensures that only one deploy task per branch/environment will run at a time.
concurrency:
group: environment-${{ github.ref }}
cancel-in-progress: true
jobs:
base-images:
runs-on: ubuntu-latest
if: github.repository == 'arendst/Tasmota'
continue-on-error: true
strategy:
matrix:
variant:
- tasmota
- tasmota4M
- tasmota-minimal
- tasmota-display
- tasmota-ir
- tasmota-knx
- tasmota-lite
- tasmota-sensors
- tasmota-zbbridge
- tasmota-zigbee
- tasmota32
- tasmota32-webcam
- tasmota32-bluetooth
- tasmota32-display
- tasmota32-ir
- tasmota32-lvgl
- tasmota32c3
- tasmota32c3usb
- tasmota32solo1
- tasmota32solo1-safeboot
- tasmota32-safeboot
- tasmota32c3-safeboot
- tasmota32c3usb-safeboot
- tasmota32s2-safeboot
- tasmota32s3-safeboot
steps:
- uses: actions/checkout@v3
with:
ref: development
- name: Set up Python
uses: actions/setup-python@v2
- name: Install dependencies
run: |
pip install -U platformio
- name: Run PlatformIO
run: platformio run -e ${{ matrix.variant }}
- uses: actions/upload-artifact@v2
with:
name: firmware
path: ./build_output
language-images:
runs-on: ubuntu-latest
if: github.repository == 'arendst/Tasmota'
continue-on-error: true
strategy:
matrix:
variant: [ tasmota, tasmota32 ]
language: [ AF, BG, BR, CN, CZ, DE, ES, FR, FY, GR, HE, HU, IT, KO, NL, PL, PT, RO, RU, SE, SK, TR, TW, UK, VN ]
steps:
- uses: actions/checkout@v3
with:
ref: development
- name: Set up Python
uses: actions/setup-python@v2
- name: Install dependencies
run: |
pip install -U platformio
- name: Run PlatformIO
run: platformio run -e ${{ matrix.variant }}-${{ matrix.language }}
- uses: actions/upload-artifact@v2
with:
name: firmware
path: ./build_output
Upload:
needs: [base-images, language-images]
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/download-artifact@v2
with:
name: firmware
path: ./mv_firmware
- name: Display structure of downloaded files
run: ls -R
working-directory: ./mv_firmware
- name: Move firmware files in sub-folders
run: |
mkdir -p ./firmware/tasmota/languages
mkdir -p ./firmware/tasmota32/languages
mkdir -p ./firmware/map
[ ! -f ./mv_firmware/map/* ] || mv ./mv_firmware/map/* ./firmware/map/
[ ! -f ./mv_firmware/firmware/tasmota.* ] || mv ./mv_firmware/firmware/tasmota.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota4M.* ] || mv ./mv_firmware/firmware/tasmota4M.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-sensors.* ] || mv ./mv_firmware/firmware/tasmota-sensors.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-minimal.bin.gz ] || mv ./mv_firmware/firmware/tasmota-minimal.bin.gz ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-lite.* ] || mv ./mv_firmware/firmware/tasmota-lite.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-ir*.* ] || mv ./mv_firmware/firmware/tasmota-ir*.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-display.* ] || mv ./mv_firmware/firmware/tasmota-display.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-knx.* ] || mv ./mv_firmware/firmware/tasmota-knx.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-zbbridge.* ] || mv ./mv_firmware/firmware/tasmota-zbbridge.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-zigbee.* ] || mv ./mv_firmware/firmware/tasmota-zigbee.* ./firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota32.* ] || mv ./mv_firmware/firmware/tasmota32.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32solo1*.* ] || mv ./mv_firmware/firmware/tasmota32solo1*.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-ir*.* ] || mv ./mv_firmware/firmware/tasmota32-ir*.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-display.* ] || mv ./mv_firmware/firmware/tasmota32-display.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-lvgl.* ] || mv ./mv_firmware/firmware/tasmota32-lvgl.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-web*.* ] || mv ./mv_firmware/firmware/tasmota32-web*.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-odroidgo.* ] || mv ./mv_firmware/firmware/tasmota32-odroidgo.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-core2.* ] || mv ./mv_firmware/firmware/tasmota32-core2.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-bluetooth.* ] || mv ./mv_firmware/firmware/tasmota32-bluetooth.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32c3*.* ] || mv ./mv_firmware/firmware/tasmota32c3*.* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-safeboot* ] || mv ./mv_firmware/firmware/tasmota32-safeboot* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-* ] || mv ./mv_firmware/firmware/tasmota32-* ./firmware/tasmota32/languages/
[ ! -f ./mv_firmware/firmware/tasmota32* ] || mv ./mv_firmware/firmware/tasmota32* ./firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/* ] || mv ./mv_firmware/firmware/* ./firmware/tasmota/languages/
- name: Display files to transfer
run: ls -R ./*
- name: Push Firmware files to tmp_copy repo
uses: Jason2866/copy_file_to_another_repo_action@main
env:
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
with:
source_file: 'firmware'
destination_repo: 'arendst/tmp_copy'
destination_branch: 'firmware'
user_email: 'github-actions@github.com'
user_name: 'github-actions'
Start_final_copy:
needs: Upload
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Creat trigger.txt
run: |
echo ${GITHUB_SHA} &> trigger.txt
echo "$(<trigger.txt)"
- name: Push trigger.txt to start workflow copy in tmp repo
uses: Jason2866/copy_file_to_another_repo_action@main
env:
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
with:
source_file: 'trigger.txt'
destination_repo: 'arendst/tmp_copy'
destination_branch: 'action-development'
user_email: 'github-actions@github.com'
user_name: 'github-actions'

View File

@ -1,170 +0,0 @@
name: Build_firmware_master
on:
push:
branches: master
paths-ignore:
- '.github/**' # Ignore changes towards the .github directory
- '**.md' # Do no build if *.md files changes
# Ensures that only one deploy task per branch/environment will run at a time.
concurrency:
group: environment-${{ github.ref }}
cancel-in-progress: true
jobs:
base-images:
runs-on: ubuntu-latest
if: github.repository == 'arendst/Tasmota'
continue-on-error: true
strategy:
matrix:
variant:
- tasmota
- tasmota4M
- tasmota-minimal
- tasmota-display
- tasmota-ir
- tasmota-knx
- tasmota-lite
- tasmota-sensors
- tasmota-zbbridge
- tasmota-zigbee
- tasmota32
- tasmota32-webcam
- tasmota32-bluetooth
- tasmota32-display
- tasmota32-ir
- tasmota32-lvgl
- tasmota32c3
- tasmota32c3usb
- tasmota32solo1
- tasmota32solo1-safeboot
- tasmota32-safeboot
- tasmota32c3-safeboot
- tasmota32s2-safeboot
- tasmota32s3-safeboot
- tasmota32c3usb-safeboot
steps:
- uses: actions/checkout@v3
with:
ref: master
- name: Set up Python
uses: actions/setup-python@v2
- name: Install dependencies
run: |
pip install -U platformio
- name: Run PlatformIO
run: platformio run -e ${{ matrix.variant }}
- uses: actions/upload-artifact@v2
with:
name: firmware
path: ./build_output
language-images:
runs-on: ubuntu-latest
if: github.repository == 'arendst/Tasmota'
continue-on-error: true
strategy:
matrix:
variant: [ tasmota, tasmota32 ]
language: [ AF, BG, BR, CN, CZ, DE, ES, FR, FY, GR, HE, HU, IT, KO, NL, PL, PT, RO, RU, SE, SK, TR, TW, UK, VN ]
steps:
- uses: actions/checkout@v3
with:
ref: master
- name: Set up Python
uses: actions/setup-python@v2
- name: Install dependencies
run: |
pip install -U platformio
- name: Run PlatformIO
run: platformio run -e ${{ matrix.variant }}-${{ matrix.language }}
- uses: actions/upload-artifact@v2
with:
name: firmware
path: ./build_output
Upload:
needs: [base-images, language-images]
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v2
with:
name: firmware
path: ./mv_firmware
- name: Display structure of downloaded files
run: ls -R ./mv_firmware/
- name: Release
uses: softprops/action-gh-release@v1
#if: startsWith(github.ref, 'refs/tags/')
with:
tag_name: ${{ github.run_number }}
files: ./mv_firmware/firmware/*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Move firmware files in sub-folders
run: |
mkdir -p ./release-firmware/tasmota/languages
mkdir -p ./release-firmware/tasmota32/languages
mkdir -p ./release-firmware/map
[ ! -f ./mv_firmware/map/* ] || mv ./mv_firmware/map/* ./release-firmware/map/
[ ! -f ./mv_firmware/firmware/tasmota.* ] || mv ./mv_firmware/firmware/tasmota.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota4M.* ] || mv ./mv_firmware/firmware/tasmota4M.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-sensors.* ] || mv ./mv_firmware/firmware/tasmota-sensors.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-minimal.bin.gz ] || mv ./mv_firmware/firmware/tasmota-minimal.bin.gz ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-lite.* ] || mv ./mv_firmware/firmware/tasmota-lite.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-ir*.* ] || mv ./mv_firmware/firmware/tasmota-ir*.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-display.* ] || mv ./mv_firmware/firmware/tasmota-display.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-knx.* ] || mv ./mv_firmware/firmware/tasmota-knx.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-zbbridge.* ] || mv ./mv_firmware/firmware/tasmota-zbbridge.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota-zigbee.* ] || mv ./mv_firmware/firmware/tasmota-zigbee.* ./release-firmware/tasmota/
[ ! -f ./mv_firmware/firmware/tasmota32.* ] || mv ./mv_firmware/firmware/tasmota32.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32solo1*.* ] || mv ./mv_firmware/firmware/tasmota32solo1*.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-ir*.* ] || mv ./mv_firmware/firmware/tasmota32-ir*.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-display.* ] || mv ./mv_firmware/firmware/tasmota32-display.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-lvgl.* ] || mv ./mv_firmware/firmware/tasmota32-lvgl.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-web*.* ] || mv ./mv_firmware/firmware/tasmota32-web*.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-odroidgo.* ] || mv ./mv_firmware/firmware/tasmota32-odroidgo.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-core2.* ] || mv ./mv_firmware/firmware/tasmota32-core2.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-bluetooth.* ] || mv ./mv_firmware/firmware/tasmota32-bluetooth.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32c3*.* ] || mv ./mv_firmware/firmware/tasmota32c3*.* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-safeboot* ] || mv ./mv_firmware/firmware/tasmota32-safeboot* ./release-firmware/tasmota32/
[ ! -f ./mv_firmware/firmware/tasmota32-* ] || mv ./mv_firmware/firmware/tasmota32-* ./release-firmware/tasmota32/languages/
[ ! -f ./mv_firmware/firmware/tasmota32* ] || mv ./mv_firmware/firmware/tasmota32* ./release-firmware/tasmota32/languages/
[ ! -f ./mv_firmware/firmware/* ] || mv ./mv_firmware/firmware/* ./release-firmware/tasmota/languages/
- name: Display files to transfer
run: ls -R ./*
- name: Push Firmware files to tmp_copy repo
uses: Jason2866/copy_file_to_another_repo_action@main
env:
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
with:
source_file: 'release-firmware'
destination_repo: 'arendst/tmp_copy'
destination_branch: 'firmware'
user_email: 'github-actions@github.com'
user_name: 'github-actions'
Start_final_copy:
needs: Upload
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Creat trigger.txt
run: |
echo ${GITHUB_SHA} &> trigger.txt
echo "$(<trigger.txt)"
- name: Push trigger.txt to start workflow copy in tmp repo
uses: Jason2866/copy_file_to_another_repo_action@main
env:
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
with:
source_file: 'trigger.txt'
destination_repo: 'arendst/tmp_copy'
destination_branch: 'action-master'
user_email: 'github-actions@github.com'
user_name: 'github-actions'

17
.gitignore vendored
View File

@ -3,6 +3,23 @@
**/__pycache__/** **/__pycache__/**
**/*.pyc **/*.pyc
*.cfg *.cfg
*.log
EyeTrackApp/venv
EyeTrackApp/eyetrack_settings.json EyeTrackApp/eyetrack_settings.json
EyeTrackApp/eyetrack_settings.backup
EyeTrackApp/build EyeTrackApp/build
EyeTrackApp/dist EyeTrackApp/dist
build
dist
EyeTrackApp/IBO_LEFT.png
EyeTrackApp/IBO_RIGHT.png
EyeTrackApp/IBO_LEFT.png
EyeTrackApp/IBO_RIGHT.png
/EBPD_LEFT.png
/EyeTrackApp/EBPD_RIGHT.png
/EBPD_RIGHT.png
/IBO_RIGHT.png
/IBO_LEFT.png
/eyetrack_settings.backup
/eyetrack_settings.json

189
.releaserc Normal file
View File

@ -0,0 +1,189 @@
{
"branches": [
"main",
"master",
"release",
{
"name": "HSF-and-new-algos-feature-branch",
"prerelease": true
}
],
"plugins": [
[
"@semantic-release/commit-analyzer",
{
"preset": "conventionalcommits",
"parserOpts": {
"noteKeywords": [
"BREAKING CHANGE",
"BREAKING CHANGES",
"BREAKING"
]
},
"releaseRules": [
{
"breaking": true,
"release": "major"
},
{
"type": "feat",
"release": "minor"
},
{
"type": "fix",
"release": "patch"
},
{
"type": "perf",
"release": "patch"
},
{
"type": "revert",
"release": "patch"
},
{
"type": "docs",
"scope": "docs-*",
"release": "minor"
},
{
"type": "docs",
"release": false
},
{
"type": "style",
"release": "patch"
},
{
"type": "refactor",
"release": "patch"
},
{
"type": "test",
"release": "patch"
},
{
"type": "build",
"release": "patch"
},
{
"type": "ci",
"scope": "ci-*",
"release": "patch"
},
{
"type": "chore",
"release": false
},
{
"type": "no-release",
"release": false
}
]
}
],
[
"@semantic-release/release-notes-generator",
{
"preset": "conventionalcommits",
"parserOpts": {
"noteKeywords": [
"BREAKING CHANGE",
"BREAKING CHANGES",
"BREAKING"
]
},
"writerOpts": {
"commitsSort": [
"subject",
"scope"
]
},
"presetConfig": {
"types": [
{
"type": "feat",
"section": "🍕 Features"
},
{
"type": "feature",
"section": "🍕 Features"
},
{
"type": "fix",
"section": "🐛 Bug Fixes"
},
{
"type": "perf",
"section": "🔥 Performance Improvements"
},
{
"type": "revert",
"section": "⏩ Reverts"
},
{
"type": "docs",
"section": "📝 Documentation"
},
{
"type": "style",
"section": "🎨 Styles"
},
{
"type": "refactor",
"section": "🧑‍💻 Code Refactoring"
},
{
"type": "test",
"section": "✅ Tests"
},
{
"type": "build",
"section": "🤖 Build System"
},
{
"type": "ci",
"section": "🔁 Continuous Integration"
}
]
}
}
],
[
"@semantic-release/changelog",
{
"changelogTitle": "# 📦 Changelog \n[![conventional commits](https://img.shields.io/badge/conventional%20commits-1.0.0-yellow.svg)](https://conventionalcommits.org)\n[![semantic versioning](https://img.shields.io/badge/semantic%20versioning-2.0.0-green.svg)](https://semver.org)\n> All notable changes to this project will be documented in this file"
}
],
[
"@semantic-release/exec",
{
"prepareCmd": "./.github/scripts/prepareCMD.sh ${nextRelease.version}",
"publishCmd": "echo Publishing ${nextRelease.version}"
}
],
[
"@semantic-release/github",
{
"addReleases": "bottom",
"assets": [
{
"path": "./dist/**/*.{msi,deb,rpm,AppImage,dmg,sha256sum,bin,exe}"
}
],
"successComment": false,
"failTitle": false
}
],
[
"@semantic-release/git",
{
"assets": [
"LICENSE*",
"CHANGELOG.md"
],
"message": "chore(${nextRelease.type}): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}"
}
]
]
}

347
CHANGELOG.md Normal file
View File

@ -0,0 +1,347 @@
# 📦 Changelog
[![conventional commits](https://img.shields.io/badge/conventional%20commits-1.0.0-yellow.svg)](https://conventionalcommits.org)
[![semantic versioning](https://img.shields.io/badge/semantic%20versioning-2.0.0-green.svg)](https://semver.org)
> All notable changes to this project will be documented in this file
## [1.0.0-HSF-and-new-algos-feature-branch.11](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.10...v1.0.0-HSF-and-new-algos-feature-branch.11) (2023-10-08)
### 🍕 Features
* Merge pull request [#94](https://github.com/EyeTrackVR/EyeTrackVR/issues/94) from DeltaNeverUsed/HSF-and-new-algos-feature-branch ([e1203aa](https://github.com/EyeTrackVR/EyeTrackVR/commit/e1203aa9a73b62684aa17e04fa424e68b2ddcb3c))
## [1.0.0-HSF-and-new-algos-feature-branch.10](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.9...v1.0.0-HSF-and-new-algos-feature-branch.10) (2023-10-05)
### 🍕 Features
* pupil dilation initial imp ([6200ddf](https://github.com/EyeTrackVR/EyeTrackVR/commit/6200ddfcab2297e59c689edfa72ddb47fbd8faa1))
* pupil dilation work start ([e694054](https://github.com/EyeTrackVR/EyeTrackVR/commit/e694054cb43d72977cdae746576bf45f02c4fc14))
## [1.0.0-HSF-and-new-algos-feature-branch.9](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.8...v1.0.0-HSF-and-new-algos-feature-branch.9) (2023-10-03)
### 🍕 Features
* stage one of new output formats ([8e28092](https://github.com/EyeTrackVR/EyeTrackVR/commit/8e2809260180f7e091ceba97c19eb52f883a3b91))
### 🐛 Bug Fixes
* temp disable leap low priority for MetalCanyon ([9bab5b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/9bab5b45551f4744e79ee9a715221b4f2719e08f))
## [1.0.0-HSF-and-new-algos-feature-branch.8](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.7...v1.0.0-HSF-and-new-algos-feature-branch.8) (2023-10-01)
### 🐛 Bug Fixes
* LEAP Eyelid, tune RANSAC BLINK ([5a6248b](https://github.com/EyeTrackVR/EyeTrackVR/commit/5a6248b897cf37a857148c099bd3336f3dc7f5cd))
* remove prints ([46427e3](https://github.com/EyeTrackVR/EyeTrackVR/commit/46427e35330e7a15501ecc9fe668a30572a82832))
## [1.0.0-HSF-and-new-algos-feature-branch.7](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.6...v1.0.0-HSF-and-new-algos-feature-branch.7) (2023-09-28)
### 🐛 Bug Fixes
* left eye ransac thresh set to right ([c6fbfe8](https://github.com/EyeTrackVR/EyeTrackVR/commit/c6fbfe8c70b5f75de7060bc3a62b3b8aedbe34f3))
* left eye ransac thresh set to right ([d11f94f](https://github.com/EyeTrackVR/EyeTrackVR/commit/d11f94f5afe90c0ffb2a9f9eb8a76bc986c8500d))
## [1.0.0-HSF-and-new-algos-feature-branch.6](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.5...v1.0.0-HSF-and-new-algos-feature-branch.6) (2023-09-27)
### 🐛 Bug Fixes
* remove y axis lock (temp) ([2ec7d2d](https://github.com/EyeTrackVR/EyeTrackVR/commit/2ec7d2d4eeed06a7bee0aad31497f55d70749751))
* tighten blink vec (needs testing/re-write, just minor fix) ([1c2760b](https://github.com/EyeTrackVR/EyeTrackVR/commit/1c2760b382efd52d368a18434f0684ba0383db3e))
## [1.0.0-HSF-and-new-algos-feature-branch.5](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.4...v1.0.0-HSF-and-new-algos-feature-branch.5) (2023-09-26)
### 🍕 Features
* LEAP Lid blink algo ([ab631fa](https://github.com/EyeTrackVR/EyeTrackVR/commit/ab631fad198c23528cfee2376fa7f5af0d0b5818))
### 🐛 Bug Fixes
* small DADDY optimizations (test for FPS limit) ([6297ce3](https://github.com/EyeTrackVR/EyeTrackVR/commit/6297ce327cbd0efd286fdc03ec1dfe7507a437ae))
## [1.0.0-HSF-and-new-algos-feature-branch.4](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.3...v1.0.0-HSF-and-new-algos-feature-branch.4) (2023-09-24)
### 🐛 Bug Fixes
* LEAP full closed thresh ([4ad1eaf](https://github.com/EyeTrackVR/EyeTrackVR/commit/4ad1eaffc4f2f3600d2442f0a5c3939cf6381258))
## [1.0.0-HSF-and-new-algos-feature-branch.3](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.2...v1.0.0-HSF-and-new-algos-feature-branch.3) (2023-09-24)
### 🐛 Bug Fixes
* UVC cameras not working ([5e28ee3](https://github.com/EyeTrackVR/EyeTrackVR/commit/5e28ee34e58719d30f41b9a70fe94d3f8995dcac))
* UVC cameras not working2 ([5209a44](https://github.com/EyeTrackVR/EyeTrackVR/commit/5209a441b6cc603db00c463c1704f94172baa3a2))
## [1.0.0-HSF-and-new-algos-feature-branch.2](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.1...v1.0.0-HSF-and-new-algos-feature-branch.2) (2023-09-23)
### 🍕 Features
* ready for new beta ([ac6f8b9](https://github.com/EyeTrackVR/EyeTrackVR/commit/ac6f8b98d2aeb48a5a91b8a361d4836022f18678))
### 🐛 Bug Fixes
* BETA 7 ([0cd9f46](https://github.com/EyeTrackVR/EyeTrackVR/commit/0cd9f46a02a18457fa4aa15d53fb391e50c23540))
* Merge pull request [#91](https://github.com/EyeTrackVR/EyeTrackVR/issues/91) from ShyAssassin/ci-cd-fix ([2a7dd65](https://github.com/EyeTrackVR/EyeTrackVR/commit/2a7dd65e08b7b9ef949cec8a8eeb7b1dc1e150a7))
### 🧑‍💻 Code Refactoring
* BETA 7 ([1d79445](https://github.com/EyeTrackVR/EyeTrackVR/commit/1d79445889f6ae4e9f8301356319e3ca78031b44))
### 🤖 Build System
* BETA 7 ([1e94577](https://github.com/EyeTrackVR/EyeTrackVR/commit/1e94577d900f41f2cb54b24568907f2aa1059fe1))
* fixed github action to early exit ([9562d4d](https://github.com/EyeTrackVR/EyeTrackVR/commit/9562d4de6fcf74f1a218f1f89bb111c53e18c431))
## 1.0.0-HSF-and-new-algos-feature-branch.1 (2023-08-24)
### ⚠ BREAKING CHANGES
* CHANGES
### 🍕 Features
* add colorama instead of using escape codes ([1d9dfea](https://github.com/EyeTrackVR/EyeTrackVR/commit/1d9dfeae19cd3ab8fd8252cb466b47576428c1e5))
* add dev container ([a5e36ad](https://github.com/EyeTrackVR/EyeTrackVR/commit/a5e36ad4c468f25901ff473555e51ff1495d9521))
* add taskipy to run tasks via poetry ([2f1d3d9](https://github.com/EyeTrackVR/EyeTrackVR/commit/2f1d3d9275310db87e4bda041c072dd9572eb07e))
* add v2 serial comms with packet headers ([c2aad0e](https://github.com/EyeTrackVR/EyeTrackVR/commit/c2aad0e8590a5533f250c9d87d45e42828930611))
* more readable logging with colorama ([2efa3c3](https://github.com/EyeTrackVR/EyeTrackVR/commit/2efa3c35896d1b10e037afa8c12fc9b780c977be))
* show bitrate, fps, latency in tracking mode ([6163744](https://github.com/EyeTrackVR/EyeTrackVR/commit/61637442c61a8c82e2b38c491d3a409080ccf2a9))
* slightly reduce dropped frame count ([3b5582d](https://github.com/EyeTrackVR/EyeTrackVR/commit/3b5582de6eab3b3e65eb7d76c631cd089a06ee40))
* updated poetry.lock ([632ec2a](https://github.com/EyeTrackVR/EyeTrackVR/commit/632ec2a8384db57e7a70099d26accbe3f97db774))
### 🐛 Bug Fixes
* bring the existing impl to a usable state ([b3f444e](https://github.com/EyeTrackVR/EyeTrackVR/commit/b3f444ee3a066b0cd84e7c9f61c04aa7542fb0ef))
* ensure serial is closed when thread crashed ([4def965](https://github.com/EyeTrackVR/EyeTrackVR/commit/4def96575969e3f22a4a915c9e5e71576403c2c8))
* identify and mitigate latency issues ([295f104](https://github.com/EyeTrackVR/EyeTrackVR/commit/295f10476d2c4ecbe266fbb1544091e58f16e563))
* module leap not found ([437b88a](https://github.com/EyeTrackVR/EyeTrackVR/commit/437b88a4bd071f78d0ff861e70c2e5a64a1f8218))
* module leap not found ([a29db80](https://github.com/EyeTrackVR/EyeTrackVR/commit/a29db809099a5122430fba58bb8f5e2f288bd7fb))
* Only ever return one blob when blob tracking ([5a451b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/5a451b4aa5b18dee1a1ce961fec04d653e2a77dd)), closes [#20](https://github.com/EyeTrackVR/EyeTrackVR/issues/20)
* Output all info to OSC unless we're failing to find any data ([764baa5](https://github.com/EyeTrackVR/EyeTrackVR/commit/764baa539dafa0382d95e53730e452cfc48710ec)), closes [#21](https://github.com/EyeTrackVR/EyeTrackVR/issues/21)
* pinv for pseudo inverse when singular matrix ([ca56222](https://github.com/EyeTrackVR/EyeTrackVR/commit/ca5622244528e60ad40e2cc994a80b39afe4bff0))
* pyserial is the correct dep, not serial ([09a9fbb](https://github.com/EyeTrackVR/EyeTrackVR/commit/09a9fbb052483cb96c7283cdbcc44bc57a8e078e))
* rate limit error ([129e07b](https://github.com/EyeTrackVR/EyeTrackVR/commit/129e07bc1c63e632c4c41b2a3d6ae7d6d858ccaf))
* standalone exe ([35e71a2](https://github.com/EyeTrackVR/EyeTrackVR/commit/35e71a212323ec539283a267079e032cb6891c34))
* Update graph background color on events ([5580d48](https://github.com/EyeTrackVR/EyeTrackVR/commit/5580d482f935c36a157053b1ad2a1d7234e6160b)), closes [#24](https://github.com/EyeTrackVR/EyeTrackVR/issues/24)
* when address is set but no devices connected ([6d7630c](https://github.com/EyeTrackVR/EyeTrackVR/commit/6d7630cf6029e2de78aa41bdd468ed4d59898951))
### 🧑‍💻 Code Refactoring
* cleanup imports ([446a20c](https://github.com/EyeTrackVR/EyeTrackVR/commit/446a20cea822189f1f5adf3d10b9d9e252b5186e))
* edit readme and dummy commit to trigger actions ([f5695f0](https://github.com/EyeTrackVR/EyeTrackVR/commit/f5695f00a80567d025c3d7ab2689b08b736b25a3))
* extract EyeInfo enums and dataclasses ([d6d1938](https://github.com/EyeTrackVR/EyeTrackVR/commit/d6d19383bd0dba3e8af39d2101658eb2dbbaf847))
### 🤖 Build System
* setup pre-release build pipeline ([aa5e6b8](https://github.com/EyeTrackVR/EyeTrackVR/commit/aa5e6b8b34dd13357ed3b4bd7989c0e6b6b29991))
* setup pre-release build pipeline ([a8b73bf](https://github.com/EyeTrackVR/EyeTrackVR/commit/a8b73bf2240aac6b7900ec409fa993bd2a692299))
* setup pre-release build pipeline ([9b27720](https://github.com/EyeTrackVR/EyeTrackVR/commit/9b27720bf9385ca7331895ae625c0b28e380c8b2))
* setup pre-release build pipeline ([e250e4e](https://github.com/EyeTrackVR/EyeTrackVR/commit/e250e4e3539dcf83cdc7ddee24a07b0303ee843e))
## 1.0.0-HSF-and-new-algos-feature-branch.1 (2023-08-24)
### ⚠ BREAKING CHANGES
* CHANGES
### 🍕 Features
* add colorama instead of using escape codes ([1d9dfea](https://github.com/EyeTrackVR/EyeTrackVR/commit/1d9dfeae19cd3ab8fd8252cb466b47576428c1e5))
* add dev container ([a5e36ad](https://github.com/EyeTrackVR/EyeTrackVR/commit/a5e36ad4c468f25901ff473555e51ff1495d9521))
* add taskipy to run tasks via poetry ([2f1d3d9](https://github.com/EyeTrackVR/EyeTrackVR/commit/2f1d3d9275310db87e4bda041c072dd9572eb07e))
* add v2 serial comms with packet headers ([c2aad0e](https://github.com/EyeTrackVR/EyeTrackVR/commit/c2aad0e8590a5533f250c9d87d45e42828930611))
* more readable logging with colorama ([2efa3c3](https://github.com/EyeTrackVR/EyeTrackVR/commit/2efa3c35896d1b10e037afa8c12fc9b780c977be))
* show bitrate, fps, latency in tracking mode ([6163744](https://github.com/EyeTrackVR/EyeTrackVR/commit/61637442c61a8c82e2b38c491d3a409080ccf2a9))
* slightly reduce dropped frame count ([3b5582d](https://github.com/EyeTrackVR/EyeTrackVR/commit/3b5582de6eab3b3e65eb7d76c631cd089a06ee40))
* updated poetry.lock ([632ec2a](https://github.com/EyeTrackVR/EyeTrackVR/commit/632ec2a8384db57e7a70099d26accbe3f97db774))
### 🐛 Bug Fixes
* bring the existing impl to a usable state ([b3f444e](https://github.com/EyeTrackVR/EyeTrackVR/commit/b3f444ee3a066b0cd84e7c9f61c04aa7542fb0ef))
* ensure serial is closed when thread crashed ([4def965](https://github.com/EyeTrackVR/EyeTrackVR/commit/4def96575969e3f22a4a915c9e5e71576403c2c8))
* identify and mitigate latency issues ([295f104](https://github.com/EyeTrackVR/EyeTrackVR/commit/295f10476d2c4ecbe266fbb1544091e58f16e563))
* module leap not found ([437b88a](https://github.com/EyeTrackVR/EyeTrackVR/commit/437b88a4bd071f78d0ff861e70c2e5a64a1f8218))
* module leap not found ([a29db80](https://github.com/EyeTrackVR/EyeTrackVR/commit/a29db809099a5122430fba58bb8f5e2f288bd7fb))
* Only ever return one blob when blob tracking ([5a451b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/5a451b4aa5b18dee1a1ce961fec04d653e2a77dd)), closes [#20](https://github.com/EyeTrackVR/EyeTrackVR/issues/20)
* Output all info to OSC unless we're failing to find any data ([764baa5](https://github.com/EyeTrackVR/EyeTrackVR/commit/764baa539dafa0382d95e53730e452cfc48710ec)), closes [#21](https://github.com/EyeTrackVR/EyeTrackVR/issues/21)
* pinv for pseudo inverse when singular matrix ([ca56222](https://github.com/EyeTrackVR/EyeTrackVR/commit/ca5622244528e60ad40e2cc994a80b39afe4bff0))
* pyserial is the correct dep, not serial ([09a9fbb](https://github.com/EyeTrackVR/EyeTrackVR/commit/09a9fbb052483cb96c7283cdbcc44bc57a8e078e))
* rate limit error ([129e07b](https://github.com/EyeTrackVR/EyeTrackVR/commit/129e07bc1c63e632c4c41b2a3d6ae7d6d858ccaf))
* standalone exe ([35e71a2](https://github.com/EyeTrackVR/EyeTrackVR/commit/35e71a212323ec539283a267079e032cb6891c34))
* Update graph background color on events ([5580d48](https://github.com/EyeTrackVR/EyeTrackVR/commit/5580d482f935c36a157053b1ad2a1d7234e6160b)), closes [#24](https://github.com/EyeTrackVR/EyeTrackVR/issues/24)
* when address is set but no devices connected ([6d7630c](https://github.com/EyeTrackVR/EyeTrackVR/commit/6d7630cf6029e2de78aa41bdd468ed4d59898951))
### 🧑‍💻 Code Refactoring
* cleanup imports ([446a20c](https://github.com/EyeTrackVR/EyeTrackVR/commit/446a20cea822189f1f5adf3d10b9d9e252b5186e))
* extract EyeInfo enums and dataclasses ([d6d1938](https://github.com/EyeTrackVR/EyeTrackVR/commit/d6d19383bd0dba3e8af39d2101658eb2dbbaf847))
### 🤖 Build System
* setup pre-release build pipeline ([aa5e6b8](https://github.com/EyeTrackVR/EyeTrackVR/commit/aa5e6b8b34dd13357ed3b4bd7989c0e6b6b29991))
* setup pre-release build pipeline ([a8b73bf](https://github.com/EyeTrackVR/EyeTrackVR/commit/a8b73bf2240aac6b7900ec409fa993bd2a692299))
* setup pre-release build pipeline ([9b27720](https://github.com/EyeTrackVR/EyeTrackVR/commit/9b27720bf9385ca7331895ae625c0b28e380c8b2))
* setup pre-release build pipeline ([e250e4e](https://github.com/EyeTrackVR/EyeTrackVR/commit/e250e4e3539dcf83cdc7ddee24a07b0303ee843e))
## 1.0.0-HSF-and-new-algos-feature-branch.1 (2023-08-24)
### ⚠ BREAKING CHANGES
* CHANGES
### 🍕 Features
* add colorama instead of using escape codes ([1d9dfea](https://github.com/EyeTrackVR/EyeTrackVR/commit/1d9dfeae19cd3ab8fd8252cb466b47576428c1e5))
* add dev container ([a5e36ad](https://github.com/EyeTrackVR/EyeTrackVR/commit/a5e36ad4c468f25901ff473555e51ff1495d9521))
* add taskipy to run tasks via poetry ([2f1d3d9](https://github.com/EyeTrackVR/EyeTrackVR/commit/2f1d3d9275310db87e4bda041c072dd9572eb07e))
* add v2 serial comms with packet headers ([c2aad0e](https://github.com/EyeTrackVR/EyeTrackVR/commit/c2aad0e8590a5533f250c9d87d45e42828930611))
* more readable logging with colorama ([2efa3c3](https://github.com/EyeTrackVR/EyeTrackVR/commit/2efa3c35896d1b10e037afa8c12fc9b780c977be))
* show bitrate, fps, latency in tracking mode ([6163744](https://github.com/EyeTrackVR/EyeTrackVR/commit/61637442c61a8c82e2b38c491d3a409080ccf2a9))
* slightly reduce dropped frame count ([3b5582d](https://github.com/EyeTrackVR/EyeTrackVR/commit/3b5582de6eab3b3e65eb7d76c631cd089a06ee40))
* updated poetry.lock ([632ec2a](https://github.com/EyeTrackVR/EyeTrackVR/commit/632ec2a8384db57e7a70099d26accbe3f97db774))
### 🐛 Bug Fixes
* bring the existing impl to a usable state ([b3f444e](https://github.com/EyeTrackVR/EyeTrackVR/commit/b3f444ee3a066b0cd84e7c9f61c04aa7542fb0ef))
* ensure serial is closed when thread crashed ([4def965](https://github.com/EyeTrackVR/EyeTrackVR/commit/4def96575969e3f22a4a915c9e5e71576403c2c8))
* identify and mitigate latency issues ([295f104](https://github.com/EyeTrackVR/EyeTrackVR/commit/295f10476d2c4ecbe266fbb1544091e58f16e563))
* module leap not found ([a29db80](https://github.com/EyeTrackVR/EyeTrackVR/commit/a29db809099a5122430fba58bb8f5e2f288bd7fb))
* Only ever return one blob when blob tracking ([5a451b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/5a451b4aa5b18dee1a1ce961fec04d653e2a77dd)), closes [#20](https://github.com/EyeTrackVR/EyeTrackVR/issues/20)
* Output all info to OSC unless we're failing to find any data ([764baa5](https://github.com/EyeTrackVR/EyeTrackVR/commit/764baa539dafa0382d95e53730e452cfc48710ec)), closes [#21](https://github.com/EyeTrackVR/EyeTrackVR/issues/21)
* pinv for pseudo inverse when singular matrix ([ca56222](https://github.com/EyeTrackVR/EyeTrackVR/commit/ca5622244528e60ad40e2cc994a80b39afe4bff0))
* pyserial is the correct dep, not serial ([09a9fbb](https://github.com/EyeTrackVR/EyeTrackVR/commit/09a9fbb052483cb96c7283cdbcc44bc57a8e078e))
* rate limit error ([129e07b](https://github.com/EyeTrackVR/EyeTrackVR/commit/129e07bc1c63e632c4c41b2a3d6ae7d6d858ccaf))
* standalone exe ([35e71a2](https://github.com/EyeTrackVR/EyeTrackVR/commit/35e71a212323ec539283a267079e032cb6891c34))
* Update graph background color on events ([5580d48](https://github.com/EyeTrackVR/EyeTrackVR/commit/5580d482f935c36a157053b1ad2a1d7234e6160b)), closes [#24](https://github.com/EyeTrackVR/EyeTrackVR/issues/24)
* when address is set but no devices connected ([6d7630c](https://github.com/EyeTrackVR/EyeTrackVR/commit/6d7630cf6029e2de78aa41bdd468ed4d59898951))
### 🧑‍💻 Code Refactoring
* cleanup imports ([446a20c](https://github.com/EyeTrackVR/EyeTrackVR/commit/446a20cea822189f1f5adf3d10b9d9e252b5186e))
* extract EyeInfo enums and dataclasses ([d6d1938](https://github.com/EyeTrackVR/EyeTrackVR/commit/d6d19383bd0dba3e8af39d2101658eb2dbbaf847))
### 🤖 Build System
* setup pre-release build pipeline ([aa5e6b8](https://github.com/EyeTrackVR/EyeTrackVR/commit/aa5e6b8b34dd13357ed3b4bd7989c0e6b6b29991))
* setup pre-release build pipeline ([a8b73bf](https://github.com/EyeTrackVR/EyeTrackVR/commit/a8b73bf2240aac6b7900ec409fa993bd2a692299))
* setup pre-release build pipeline ([9b27720](https://github.com/EyeTrackVR/EyeTrackVR/commit/9b27720bf9385ca7331895ae625c0b28e380c8b2))
* setup pre-release build pipeline ([e250e4e](https://github.com/EyeTrackVR/EyeTrackVR/commit/e250e4e3539dcf83cdc7ddee24a07b0303ee843e))
## [1.0.0-HSF-and-new-algos-feature-branch.2](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.1...v1.0.0-HSF-and-new-algos-feature-branch.2) (2023-08-24)
### 🐛 Bug Fixes
* rate limit error ([129e07b](https://github.com/EyeTrackVR/EyeTrackVR/commit/129e07bc1c63e632c4c41b2a3d6ae7d6d858ccaf))
## 1.0.0-HSF-and-new-algos-feature-branch.1 (2023-08-24)
### ⚠ BREAKING CHANGES
* CHANGES
### 🍕 Features
* add colorama instead of using escape codes ([1d9dfea](https://github.com/EyeTrackVR/EyeTrackVR/commit/1d9dfeae19cd3ab8fd8252cb466b47576428c1e5))
* add dev container ([a5e36ad](https://github.com/EyeTrackVR/EyeTrackVR/commit/a5e36ad4c468f25901ff473555e51ff1495d9521))
* add taskipy to run tasks via poetry ([2f1d3d9](https://github.com/EyeTrackVR/EyeTrackVR/commit/2f1d3d9275310db87e4bda041c072dd9572eb07e))
* add v2 serial comms with packet headers ([c2aad0e](https://github.com/EyeTrackVR/EyeTrackVR/commit/c2aad0e8590a5533f250c9d87d45e42828930611))
* more readable logging with colorama ([2efa3c3](https://github.com/EyeTrackVR/EyeTrackVR/commit/2efa3c35896d1b10e037afa8c12fc9b780c977be))
* show bitrate, fps, latency in tracking mode ([6163744](https://github.com/EyeTrackVR/EyeTrackVR/commit/61637442c61a8c82e2b38c491d3a409080ccf2a9))
* slightly reduce dropped frame count ([3b5582d](https://github.com/EyeTrackVR/EyeTrackVR/commit/3b5582de6eab3b3e65eb7d76c631cd089a06ee40))
* updated poetry.lock ([632ec2a](https://github.com/EyeTrackVR/EyeTrackVR/commit/632ec2a8384db57e7a70099d26accbe3f97db774))
### 🐛 Bug Fixes
* bring the existing impl to a usable state ([b3f444e](https://github.com/EyeTrackVR/EyeTrackVR/commit/b3f444ee3a066b0cd84e7c9f61c04aa7542fb0ef))
* ensure serial is closed when thread crashed ([4def965](https://github.com/EyeTrackVR/EyeTrackVR/commit/4def96575969e3f22a4a915c9e5e71576403c2c8))
* identify and mitigate latency issues ([295f104](https://github.com/EyeTrackVR/EyeTrackVR/commit/295f10476d2c4ecbe266fbb1544091e58f16e563))
* Only ever return one blob when blob tracking ([5a451b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/5a451b4aa5b18dee1a1ce961fec04d653e2a77dd)), closes [#20](https://github.com/EyeTrackVR/EyeTrackVR/issues/20)
* Output all info to OSC unless we're failing to find any data ([764baa5](https://github.com/EyeTrackVR/EyeTrackVR/commit/764baa539dafa0382d95e53730e452cfc48710ec)), closes [#21](https://github.com/EyeTrackVR/EyeTrackVR/issues/21)
* pinv for pseudo inverse when singular matrix ([ca56222](https://github.com/EyeTrackVR/EyeTrackVR/commit/ca5622244528e60ad40e2cc994a80b39afe4bff0))
* pyserial is the correct dep, not serial ([09a9fbb](https://github.com/EyeTrackVR/EyeTrackVR/commit/09a9fbb052483cb96c7283cdbcc44bc57a8e078e))
* standalone exe ([35e71a2](https://github.com/EyeTrackVR/EyeTrackVR/commit/35e71a212323ec539283a267079e032cb6891c34))
* Update graph background color on events ([5580d48](https://github.com/EyeTrackVR/EyeTrackVR/commit/5580d482f935c36a157053b1ad2a1d7234e6160b)), closes [#24](https://github.com/EyeTrackVR/EyeTrackVR/issues/24)
* when address is set but no devices connected ([6d7630c](https://github.com/EyeTrackVR/EyeTrackVR/commit/6d7630cf6029e2de78aa41bdd468ed4d59898951))
### 🧑‍💻 Code Refactoring
* cleanup imports ([446a20c](https://github.com/EyeTrackVR/EyeTrackVR/commit/446a20cea822189f1f5adf3d10b9d9e252b5186e))
* extract EyeInfo enums and dataclasses ([d6d1938](https://github.com/EyeTrackVR/EyeTrackVR/commit/d6d19383bd0dba3e8af39d2101658eb2dbbaf847))
### 🤖 Build System
* setup pre-release build pipeline ([aa5e6b8](https://github.com/EyeTrackVR/EyeTrackVR/commit/aa5e6b8b34dd13357ed3b4bd7989c0e6b6b29991))
* setup pre-release build pipeline ([a8b73bf](https://github.com/EyeTrackVR/EyeTrackVR/commit/a8b73bf2240aac6b7900ec409fa993bd2a692299))
* setup pre-release build pipeline ([9b27720](https://github.com/EyeTrackVR/EyeTrackVR/commit/9b27720bf9385ca7331895ae625c0b28e380c8b2))
* setup pre-release build pipeline ([e250e4e](https://github.com/EyeTrackVR/EyeTrackVR/commit/e250e4e3539dcf83cdc7ddee24a07b0303ee843e))
## 1.0.0-HSF-and-new-algos-feature-branch.1 (2023-08-24)
### 🍕 Features
* add colorama instead of using escape codes ([1d9dfea](https://github.com/EyeTrackVR/EyeTrackVR/commit/1d9dfeae19cd3ab8fd8252cb466b47576428c1e5))
* add dev container ([a5e36ad](https://github.com/EyeTrackVR/EyeTrackVR/commit/a5e36ad4c468f25901ff473555e51ff1495d9521))
* add taskipy to run tasks via poetry ([2f1d3d9](https://github.com/EyeTrackVR/EyeTrackVR/commit/2f1d3d9275310db87e4bda041c072dd9572eb07e))
* add v2 serial comms with packet headers ([c2aad0e](https://github.com/EyeTrackVR/EyeTrackVR/commit/c2aad0e8590a5533f250c9d87d45e42828930611))
* more readable logging with colorama ([2efa3c3](https://github.com/EyeTrackVR/EyeTrackVR/commit/2efa3c35896d1b10e037afa8c12fc9b780c977be))
* show bitrate, fps, latency in tracking mode ([6163744](https://github.com/EyeTrackVR/EyeTrackVR/commit/61637442c61a8c82e2b38c491d3a409080ccf2a9))
* slightly reduce dropped frame count ([3b5582d](https://github.com/EyeTrackVR/EyeTrackVR/commit/3b5582de6eab3b3e65eb7d76c631cd089a06ee40))
* updated poetry.lock ([632ec2a](https://github.com/EyeTrackVR/EyeTrackVR/commit/632ec2a8384db57e7a70099d26accbe3f97db774))
### 🐛 Bug Fixes
* bring the existing impl to a usable state ([b3f444e](https://github.com/EyeTrackVR/EyeTrackVR/commit/b3f444ee3a066b0cd84e7c9f61c04aa7542fb0ef))
* ensure serial is closed when thread crashed ([4def965](https://github.com/EyeTrackVR/EyeTrackVR/commit/4def96575969e3f22a4a915c9e5e71576403c2c8))
* identify and mitigate latency issues ([295f104](https://github.com/EyeTrackVR/EyeTrackVR/commit/295f10476d2c4ecbe266fbb1544091e58f16e563))
* Only ever return one blob when blob tracking ([5a451b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/5a451b4aa5b18dee1a1ce961fec04d653e2a77dd)), closes [#20](https://github.com/EyeTrackVR/EyeTrackVR/issues/20)
* Output all info to OSC unless we're failing to find any data ([764baa5](https://github.com/EyeTrackVR/EyeTrackVR/commit/764baa539dafa0382d95e53730e452cfc48710ec)), closes [#21](https://github.com/EyeTrackVR/EyeTrackVR/issues/21)
* pinv for pseudo inverse when singular matrix ([ca56222](https://github.com/EyeTrackVR/EyeTrackVR/commit/ca5622244528e60ad40e2cc994a80b39afe4bff0))
* pyserial is the correct dep, not serial ([09a9fbb](https://github.com/EyeTrackVR/EyeTrackVR/commit/09a9fbb052483cb96c7283cdbcc44bc57a8e078e))
* Update graph background color on events ([5580d48](https://github.com/EyeTrackVR/EyeTrackVR/commit/5580d482f935c36a157053b1ad2a1d7234e6160b)), closes [#24](https://github.com/EyeTrackVR/EyeTrackVR/issues/24)
* when address is set but no devices connected ([6d7630c](https://github.com/EyeTrackVR/EyeTrackVR/commit/6d7630cf6029e2de78aa41bdd468ed4d59898951))
### 🧑‍💻 Code Refactoring
* cleanup imports ([446a20c](https://github.com/EyeTrackVR/EyeTrackVR/commit/446a20cea822189f1f5adf3d10b9d9e252b5186e))
* extract EyeInfo enums and dataclasses ([d6d1938](https://github.com/EyeTrackVR/EyeTrackVR/commit/d6d19383bd0dba3e8af39d2101658eb2dbbaf847))
### 🤖 Build System
* setup pre-release build pipeline ([aa5e6b8](https://github.com/EyeTrackVR/EyeTrackVR/commit/aa5e6b8b34dd13357ed3b4bd7989c0e6b6b29991))
* setup pre-release build pipeline ([a8b73bf](https://github.com/EyeTrackVR/EyeTrackVR/commit/a8b73bf2240aac6b7900ec409fa993bd2a692299))
* setup pre-release build pipeline ([9b27720](https://github.com/EyeTrackVR/EyeTrackVR/commit/9b27720bf9385ca7331895ae625c0b28e380c8b2))
* setup pre-release build pipeline ([e250e4e](https://github.com/EyeTrackVR/EyeTrackVR/commit/e250e4e3539dcf83cdc7ddee24a07b0303ee843e))

28
Dockerfile Normal file
View File

@ -0,0 +1,28 @@
# For more information, please refer to https://aka.ms/vscode-docker-python
FROM python:3.10-slim
EXPOSE 8000
# Keeps Python from generating .pyc files in the container
ENV PYTHONDONTWRITEBYTECODE=1
# Turns off buffering for easier container logging
ENV PYTHONUNBUFFERED=1
WORKDIR /app
COPY . /app
# Install pip requirements
COPY pyproject.toml .
RUN apt-get update &&\
apt-get install ffmpeg libsm6 libxext6 -y &&\
apt install libgl1-mesa-glx &&\
apt install build-essential -y --no-install-recommends &&\
apt-get install make &&\
apt-get install python3-pip -y --no-install-recommends &&\
pip3 install poetry
# Creates a non-root user with an explicit UID and adds permission to access the /app folder
# For more info, please refer to https://aka.ms/vscode-docker-python-configure-containers
RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app
USER appuser

704
EyeTrackApp/AHSF.py Normal file
View File

@ -0,0 +1,704 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Adaptive Haar Surround Feature: Summer, PallasNeko (Optimization)
Algorithm App Implementations and Tweaks By: Prohurtz
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Summer Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import functools
import math
import os
import sys
import time
import timeit
from logging import FileHandler, Formatter, INFO, StreamHandler, getLogger
from functools import lru_cache
import cv2
import numpy as np
# from line_profiler_pycharm import profile
class AHSF:
def __init__(self, video_src, save_logfile=False, imshow_enable=False, save_video=False):
self.this_file_basename = os.path.basename(__file__)
self.this_file_name = self.this_file_basename.replace(".py", "")
self.alg_ver = "PallasNekoV3"
self.save_logfile = save_logfile
self.imshow_enable = imshow_enable
self.save_video = save_video
self.VideoCapture_SRC = video_src
self.input_is_webcam = False
self.benchmark_flag = True if not self.input_is_webcam and not self.imshow_enable and not self.save_video else False
self.loop_num = 1 if self.imshow_enable or self.save_video else 10
self.output_video_path = f"./{self.this_file_name}.mp4"
self.logfilename = f"./{self.this_file_name}.log"
self.print_enable = False
self.lru_maxsize_vvs = 16
self.lru_maxsize_vs = 64
self.lru_maxsize_s = 128
self.logger = getLogger(__name__)
self.logger.setLevel(INFO)
formatter = Formatter("%(message)s")
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
if self.save_logfile:
handler = FileHandler(self.logfilename, encoding="utf8", mode="w")
handler.setLevel(INFO)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
else:
self.save_logfile = False
self.video_wr = cv2.VideoWriter if self.save_video else None
def format_time(self, timespan, precision=3):
"""
https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473
Formats the timespan in a human readable form
"""
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append("%s%s" % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = ["s", "ms", "us", "ns"] # the save value
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
try:
"\xb5".encode(sys.stdout.encoding)
units = ["s", "ms", "\xb5s", "ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return "%.*g %s" % (precision, timespan * scaling[order], units[order])
def filter_light(self, img_gray, img_blur, tau):
for i in range(img_gray.shape[1]):
for j in range(img_gray.shape[0]):
if img_gray[j, i] > tau:
img_blur[j, i] = tau
else:
img_blur[j, i] = img_gray[j, i]
return img_blur
def pupil_detector_haar(self, img_gray, params):
frame_num = 0
img_down = cv2.resize(
img_gray,
(
img_gray.shape[1] // params["ratio_downsample"],
img_gray.shape[0] // params["ratio_downsample"],
),
)
img_boundary = (0, 0, img_down.shape[1], img_down.shape[0])
if params["use_init_rect"]:
tau = max(params["mu_outer"], params["mu_inner"] + 30)
self.filter_light(img_down, img_down, tau)
# Coarse Detection
(
pupil_rect_coarse,
outer_rect_coarse,
max_response_coarse,
mu_inner,
mu_outer,
) = self.coarse_detection(img_down, params)
print(
"Coarse Detection: ",
pupil_rect_coarse,
outer_rect_coarse,
max_response_coarse,
mu_inner,
mu_outer,
)
if params["use_init_rect"] and frame_num == 0:
mu_inner0 = mu_inner
mu_outer0 = mu_outer
kf = 2 - 0.01 * mu_inner0
img_coarse = cv2.cvtColor(img_down, cv2.COLOR_GRAY2BGR)
# show image
# Fine Detection
if mu_outer - mu_inner >= 5:
pupil_rect_fine = self.fine_detection(img_down, pupil_rect_coarse)
else:
pupil_rect_fine = pupil_rect_coarse
# Postprocessing
pupil_rect_coarse = self.rect_scale(pupil_rect_coarse, params["ratio_downsample"], False)
outer_rect_coarse = self.rect_scale(outer_rect_coarse, params["ratio_downsample"], False)
pupil_rect_fine = self.rect_scale(pupil_rect_fine, params["ratio_downsample"], False)
center_coarse = (
pupil_rect_coarse[0] + pupil_rect_coarse[2] // 2,
pupil_rect_coarse[1] + pupil_rect_coarse[3] // 2,
)
center_fine = (
pupil_rect_fine[0] + pupil_rect_fine[2] // 2,
pupil_rect_fine[1] + pupil_rect_fine[3] // 2,
)
return (
pupil_rect_coarse,
outer_rect_coarse,
pupil_rect_fine,
center_coarse,
center_fine,
)
# @lru_cache(maxsize=self.lru_maxsize_vvs)
def get_empty_array(self, frame_shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer):
frame_int_dtype = np.intc
np_index_dtype = (
np.intc
) # memo: Better to use np.intp, but a little slower ref: https://numpy.org/doc/1.25/user/basics.indexing.html#detailed-notes
row, col = frame_shape
frame_int = np.empty((row + 1, col + 1), dtype=frame_int_dtype)
w_arr = np.arange(width_min, width_max + 1, wh_step, dtype=np_index_dtype)
h_arr = (w_arr / ratio_outer).astype(np.int16)
# memo: It is not smart code and needs to be changed.
y_out_n = np.hstack([np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) for h in h_arr])
x_out_n = np.hstack([np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) for w in w_arr])
y_out_h = np.hstack([np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + h for h in h_arr])
x_out_w = np.hstack([np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + w for w in w_arr])
out_h = y_out_h - y_out_n
out_w = x_out_w - x_out_n
y_in_n = np.hstack([np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + int(h / 4) for h in h_arr])
x_in_n = np.hstack([np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + int(w / 4) for w in w_arr])
y_in_h = np.hstack(
[np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + int(h / 4) + int(h / 2) for h in h_arr]
)
x_in_w = np.hstack(
[np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + int(w / 4) + int(w / 2) for w in w_arr]
)
in_h = y_in_h - y_in_n
in_w = x_in_w - x_in_n
# # memo: Unelegant code
# # memo: Non-transposed version
# wh_in_arr = np.hstack([np.full(((roi[3] - h) - (roi[1] + h) - 1) // xy_step + 1,int(h/2),dtype=np_index_dtype) for h in h_arr])[:, np.newaxis] * np.hstack([np.full(((roi[2] - w) - (roi[0] + w) - 1) // xy_step + 1,int(w/2),dtype=np_index_dtype) for w in w_arr])[np.newaxis, :]
# wh_out_arr = np.hstack([np.full(((roi[3] - h) - (roi[1] + h) - 1) // xy_step + 1,h,dtype=np_index_dtype) for h in h_arr])[:, np.newaxis] * np.hstack([np.full(((roi[2] - w) - (roi[0] + w) - 1) // xy_step + 1,w,dtype=np_index_dtype) for w in w_arr])[np.newaxis, :]
# memo: Unelegant code
# memo: transposed version
wh_in_arr = (
np.hstack(
[
np.full(
((roi[2] - w) - (roi[0] + w) - 1) // xy_step + 1,
int(w / 2),
dtype=np_index_dtype,
)
for w in w_arr
]
)[:, np.newaxis]
* np.hstack(
[
np.full(
((roi[3] - h) - (roi[1] + h) - 1) // xy_step + 1,
int(h / 2),
dtype=np_index_dtype,
)
for h in h_arr
]
)[np.newaxis, :]
)
wh_out_arr = (
np.hstack(
[
np.full(
((roi[2] - w) - (roi[0] + w) - 1) // xy_step + 1,
w,
dtype=np_index_dtype,
)
for w in w_arr
]
)[:, np.newaxis]
* np.hstack(
[
np.full(
((roi[3] - h) - (roi[1] + h) - 1) // xy_step + 1,
h,
dtype=np_index_dtype,
)
for h in h_arr
]
)[np.newaxis, :]
)
mu_outer_rect = cv2.subtract(
wh_out_arr, wh_in_arr
) # ,dst=) # == (outer_rect[2] * outer_rect[3] - inner_rect[2] * inner_rect[3])
wh_in_arr = 1 / wh_in_arr # .astype(np.float32)
# wh_out_arr=wh_out_arr.astype(np.float64)
mu_outer_rect = 1 / mu_outer_rect # .astype(np.float32)
mu_outer_rect2 = -1.0 * mu_outer_rect # cv2.merge([mu_outer_rect,-1.0*mu_outer_rect])
# 1/wh_in_arr == wh_in_arr_mul
return (
frame_int,
y_out_n,
x_out_n,
y_out_h,
x_out_w,
out_h,
out_w,
y_in_n,
x_in_n,
y_in_h,
x_in_w,
in_h,
in_w,
wh_in_arr,
wh_out_arr,
mu_outer_rect,
mu_outer_rect2,
)
# @profile
def coarse_detection(self, img_gray, params):
ratio_outer = params["ratio_outer"]
kf = params["kf"]
width_min = params["width_min"]
width_max = params["width_max"]
wh_step = params["wh_step"]
xy_step = params["xy_step"]
roi = params["roi"]
init_rect_flag = params["init_rect_flag"]
init_rect = params["init_rect"]
mu_inner = params["mu_inner"]
mu_outer = params["mu_outer"]
max_response_coarse = -255
imgboundary = (0, 0, img_gray.shape[1], img_gray.shape[0])
img_blur = np.copy(img_gray)
rectlist = []
response = []
# Assign values to avoid unassigned errors
pupil_rect_coarse = (10, 10, 10, 10)
outer_rect_coarse = (5, 5, 5, 5)
if init_rect_flag:
init_rect_down = self.rect_scale(init_rect, params["ratio_downsample"], False)
init_rect_down = self.intersect_rect(init_rect_down, imgboundary)
img_blur = img_gray[
init_rect_down[1]: init_rect_down[1] + init_rect_down[3],
init_rect_down[0]: init_rect_down[0] + init_rect_down[2],
]
(
frame_int,
y_out_n,
x_out_n,
y_out_h,
x_out_w,
out_h,
out_w,
y_in_n,
x_in_n,
y_in_h,
x_in_w,
in_h,
in_w,
wh_in_arr,
wh_out_arr,
mu_outer_rect,
mu_outer_rect2,
) = self.get_empty_array(img_blur.shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer)
cv2.integral(
img_blur, sum=frame_int, sdepth=cv2.CV_32S
)
out_p_temp = frame_int.take(y_out_n, axis=0, mode="clip")
out_p_temp = cv2.transpose(out_p_temp)
out_p00 = out_p_temp.take(x_out_n, axis=0, mode="clip")
out_p01 = out_p_temp.take(x_out_w, axis=0, mode="clip")
out_p_temp = frame_int.take(y_out_h, axis=0, mode="clip")
out_p_temp = cv2.transpose(out_p_temp)
out_p11 = out_p_temp.take(x_out_w, axis=0, mode="clip")
out_p10 = out_p_temp.take(x_out_n, axis=0, mode="clip")
outer_sum = cv2.add(out_p00, out_p11)
cv2.subtract(outer_sum, out_p01, dst=outer_sum)
cv2.subtract(outer_sum, out_p10, dst=outer_sum)
in_p_temp = frame_int.take(y_in_n, axis=0, mode="clip")
in_p_temp = cv2.transpose(in_p_temp)
in_p00 = in_p_temp.take(x_in_n, axis=0, mode="clip")
in_p01 = in_p_temp.take(x_in_w, axis=0, mode="clip")
in_p_temp = frame_int.take(y_in_h, axis=0, mode="clip")
in_p_temp = cv2.transpose(in_p_temp)
in_p11 = in_p_temp.take(x_in_w, axis=0, mode="clip")
in_p10 = in_p_temp.take(x_in_n, axis=0, mode="clip")
inner_sum = cv2.add(in_p00, in_p11)
cv2.subtract(inner_sum, in_p01, dst=inner_sum)
cv2.subtract(inner_sum, in_p10, dst=inner_sum)
inner_sum_f = inner_sum.astype(np.float64)
outer_sum_f = outer_sum.astype(np.float64)
response_value = np.empty(outer_sum.shape, dtype=np.float64)
inout_rect_sum = mu_outer_rect2.copy()
inout_rect_mul = mu_outer_rect.copy()
cv2.multiply(inner_sum_f, inout_rect_mul, inout_rect_mul)
cv2.multiply(outer_sum_f, inout_rect_sum, inout_rect_sum)
cv2.add(inout_rect_mul, inout_rect_sum, dst=inout_rect_sum)
cv2.multiply(inner_sum_f, wh_in_arr, inner_sum_f, kf)
cv2.add(inout_rect_sum, inner_sum_f, dst=response_value)
min_response, max_response, min_loc, max_loc = cv2.minMaxLoc(response_value)
rec_o = (
x_out_n[min_loc[1]],
y_out_n[min_loc[0]],
out_w[min_loc[1]],
out_h[min_loc[0]],
)
rec_in = (
x_in_n[min_loc[1]],
y_in_n[min_loc[0]],
in_w[min_loc[1]],
in_h[min_loc[0]],
)
max_response_coarse = -min_response
pupil_rect_coarse = rec_in
outer_rect_coarse = rec_o
return pupil_rect_coarse, outer_rect_coarse, max_response_coarse, mu_inner, mu_outer
def fine_detection(self, img_gray, pupil_rect_coarse):
boundary = (0, 0, img_gray.shape[1], img_gray.shape[0])
valid_ratio = 1.2
valid_rect = self.intersect_rect(self.rect_scale(pupil_rect_coarse, valid_ratio), boundary)
img_pupil = img_gray[
valid_rect[1] : valid_rect[1] + valid_rect[3],
valid_rect[0] : valid_rect[0] + valid_rect[2],
]
img_pupil_blur = cv2.GaussianBlur(img_pupil, (5, 5), 0, 0)
edges_filter = self.detect_edges(img_pupil_blur)
# fit ellipse to edges
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# sort contours by area
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
# fit ellipse to largest contour
try:
if len(contours) > 0 and len(contours[0]) >= 5:
pupil_contour = contours[0]
pupil_ellipse = cv2.fitEllipse(pupil_contour)
center_fitting = (
int(pupil_ellipse[0][0] + valid_rect[0]),
int(pupil_ellipse[0][1] + valid_rect[1]),
)
pupil_rect_fine = (
int(pupil_ellipse[0][0] - pupil_ellipse[1][0] / 2),
int(pupil_ellipse[0][1] - pupil_ellipse[1][1] / 2),
int(pupil_ellipse[1][0]),
int(pupil_ellipse[1][1]),
)
pupil_rect_fine = (
pupil_rect_fine[0] + valid_rect[0],
pupil_rect_fine[1] + valid_rect[1],
pupil_rect_fine[2],
pupil_rect_fine[3],
)
pupil_rect_fine = self.intersect_rect(pupil_rect_fine, boundary)
pupil_rect_fine = self.rect_scale(pupil_rect_fine, 1 / valid_ratio)
else:
pupil_rect_fine = pupil_rect_coarse
center_fitting = (
int(pupil_rect_fine[0] + pupil_rect_fine[2] / 2),
int(pupil_rect_fine[1] + pupil_rect_fine[3] / 2),
)
except:
pass
try:
return pupil_rect_fine, center_fitting
except:
pass
def detect_edges(self, img_pupil_blur):
tau1 = 1 - 20.0 / img_pupil_blur.shape[1]
edges = cv2.Canny(img_pupil_blur, 64, 128)
# img_bw = np.zeros_like(img_pupil_blur)
# img_bw[img_pupil_blur > 100] = 255
img_bw = cv2.compare(img_pupil_blur, 100, cv2.CMP_GT)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
img_bw = cv2.dilate(img_bw, kernel)
# edges_filter = edges & (~img_bw)
# or
edges_filter = cv2.bitwise_and(edges, cv2.bitwise_not(img_bw))
return edges_filter
def fit_pupil_ellipse_swirski(self, img_pupil, edges_filter):
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
max_contour_area = 0
max_contour = None
#print("contours: ", contours)
for contour in contours:
area = cv2.contourArea(contour)
if area > max_contour_area:
max_contour_area = area
max_contour = contour
if max_contour is None:
return (0, 0, 0, 0), None
ellipse = cv2.fitEllipse(max_contour)
return ellipse
def rect_scale(self, rect, scale, round_up=True):
x, y, width, height = rect
new_width = int(width * scale)
new_height = int(height * scale)
if round_up:
new_width = int(np.ceil(width * scale))
new_height = int(np.ceil(height * scale))
new_x = x + int((width - new_width) / 2)
new_y = y + int((height - new_height) / 2)
return new_x, new_y, new_width, new_height
def intersect_rect(self, rect1, rect2):
x1, y1, w1, h1 = rect1
x2, y2, w2, h2 = rect2
x = max(x1, x2)
y = max(y1, y2)
w = min(x1 + w1, x2 + w2) - x
h = min(y1 + h1, y2 + h2) - y
return x, y, w, h
def rect_suppression(self, rectlist, response, rectlist_out, response_out):
for i in range(len(rectlist)):
flag_intersect = False
for j in range(len(rectlist_out)):
tmp = self.intersect_rect(rectlist[i], rectlist_out[j])
if tmp[2] > 0 and tmp[3] > 0:
flag_intersect = True
if response[i] > response_out[j]:
rectlist_out[j] = rectlist[i]
response_out[j] = response[i]
else:
continue
if not flag_intersect:
rectlist_out.append(rectlist[i])
response_out.append(response[i])
return rectlist_out, response_out
def put_number(self, img_bgr, number, position, color):
cv2.putText(
img_bgr,
str(number),
(int(position[0]) + 10, int(position[1]) - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
1,
cv2.LINE_AA,
)
def External_Run_AHSF(self, frame_gray):
average_color = np.mean(frame_gray)
height, width = frame_gray.shape
max_dimension = max(height, width)
square_background = np.full((max_dimension, max_dimension), average_color, dtype=np.uint8)
x_offset = (max_dimension - width) // 2
y_offset = (max_dimension - height) // 2
square_background[y_offset : y_offset + height, x_offset : x_offset + width] = frame_gray
frame_gray = cv2.resize(square_background, (100, 100))
frame_clear_resize = frame_gray.copy()
params = {
"ratio_downsample": 0.5,
"use_init_rect": False,
"mu_outer": 200,
"mu_inner": 50,
"ratio_outer": 1,
"kf": 1,
"width_min": 25,
"width_max": 50,
"wh_step": 1,
"xy_step": 5,
"roi": (0, 0, frame_gray.shape[1], frame_gray.shape[0]),
"init_rect_flag": False,
"init_rect": (0, 0, frame_gray.shape[1], frame_gray.shape[0]),
}
try:
pupil_rect_coarse, outer_rect_coarse, max_response_coarse, mu_inner, mu_outer = self.coarse_detection(frame_gray, params)
# ellipse_rect, center_fitting = self.fine_detection(frame_gray, pupil_rect_coarse)
except TypeError:
return frame_gray, frame_gray, 0, 0, 0
x_center = outer_rect_coarse[0] + outer_rect_coarse[2] / 2
y_center = outer_rect_coarse[1] + outer_rect_coarse[3] / 2
x, y, width, height = outer_rect_coarse
cv2.circle(frame_gray, (int(x_center), int(y_center)), 2, (255, 255, 255), -1)
thickness = 1
cv2.rectangle(frame_gray, (pupil_rect_coarse[0], pupil_rect_coarse[1]),
(pupil_rect_coarse[0] + pupil_rect_coarse[2], pupil_rect_coarse[1] + pupil_rect_coarse[3]),
(0, 255, 0), 2)
cv2.rectangle(frame_gray, (outer_rect_coarse[0], outer_rect_coarse[1]),
(outer_rect_coarse[0] + outer_rect_coarse[2], outer_rect_coarse[1] + outer_rect_coarse[3]),
(255, 0, 0), 2)
major_diameter = math.sqrt(width**2 + height**2)
minor_diameter = min(width, height)
average_diameter = (major_diameter + minor_diameter) / 2
return frame_gray, frame_clear_resize, x_center, y_center, abs(width - height)
class FPSResult(object):
"""
base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55
"""
def __init__(self, loops, repeat, best, worst, all_runs, precision):
self.loops = loops
self.repeat = repeat
self.best = 1 / best
self.worst = 1 / worst
self.all_runs = all_runs
self._precision = precision
self.fps = [1 / dt for dt in all_runs]
self.unit = "fps"
@property
def average(self):
return math.fsum(self.fps) / len(self.fps)
@property
def stdev(self):
mean = self.average
return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5
def __str__(self):
pm = "+-"
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
try:
"\xb1".encode(sys.stdout.encoding)
pm = "\xb1"
except:
pass
return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
pm=pm,
runs=self.repeat,
loops=self.loops,
loop_plural="" if self.loops == 1 else "s",
run_plural="" if self.repeat == 1 else "s",
mean="%.*g%s" % (self._precision, self.average, self.unit),
std="%.*g%s" % (self._precision, self.stdev, self.unit),
best="%.*g%s" % (self._precision, self.best, self.unit),
worst="%.*g%s" % (self._precision, self.worst, self.unit),
)
def _repr_pretty_(self, p, cycle):
unic = self.__str__()
p.text("<FPSResult : " + unic + ">")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
; Script generated by the Inno Setup Script Wizard.
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
#define MyAppName "EyeTrackVR"
#define MyAppVersion "0.2 BETA"
#define MyAppPublisher "EyeTrackVR"
#define MyAppURL "https://redhawk989.github.io/EyeTrackVR/"
#define MyAppExeName "eyetrackapp.exe"
[Setup]
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
AppId={{60B32C3C-819B-4685-AF40-49AB93539405}
AppName={#MyAppName}
AppVersion={#MyAppVersion}
;AppVerName={#MyAppName} {#MyAppVersion}
AppPublisher={#MyAppPublisher}
AppPublisherURL={#MyAppURL}
AppSupportURL={#MyAppURL}
AppUpdatesURL={#MyAppURL}
DefaultDirName={autopf}\{#MyAppName}
DisableProgramGroupPage=yes
; Uncomment the following line to run in non administrative install mode (install for current user only.)
;PrivilegesRequired=lowest
OutputBaseFilename=EyeTrackVR-Setup
SetupIconFile=C:\Users\beaul\OneDrive\Desktop\EyeTrackApp\Images\logo.ico
Compression=lzma/ultra64
SolidCompression=yes
WizardStyle=modern
[Languages]
Name: "english"; MessagesFile: "compiler:Default.isl"
[Tasks]
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}";
[Files]
Source: "C:\Users\beaul\OneDrive\Desktop\EyeTrackVR\EyeTrackApp\dist\EyeTrackApp\{#MyAppExeName}"; DestDir: "{app}"; Flags: ignoreversion
Source: "C:\Users\beaul\OneDrive\Desktop\EyeTrackVR\EyeTrackApp\dist\EyeTrackApp\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Dirs]
Name: {app}; Permissions: users-full
[Icons]
Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"
Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: desktopicon
[Run]
Filename: "{app}\{#MyAppExeName}"; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; Flags: nowait postinstall skipifsilent

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

BIN
EyeTrackApp/Purple_Dot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

78
EyeTrackApp/blink.py Normal file
View File

@ -0,0 +1,78 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Binary Intensity Based Blink by: Summer, Prohurtz
Algorithm App Implementations and tweaks By: Prohurtz
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Summer Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import numpy as np
def BLINK(self):
if self.blink_clear == True:
self.max_ints = []
self.max_int = 0
self.frames = 0
intensity = np.sum(self.current_image_gray_clean)
if self.calibration_frame_counter == 300:
self.filterlist = [] # clear filter
if len(self.filterlist) < 300:
self.filterlist.append(intensity)
else:
self.filterlist.pop(0)
self.filterlist.append(intensity)
if (
intensity >= np.percentile(self.filterlist, 99)
or intensity <= np.percentile(self.filterlist, 1)
and len(self.max_ints) >= 1
): # filter abnormally high values
try: # I don't want this here but I cant get python to stop crying when it's not
intensity = min(self.max_ints)
except:
pass
self.frames = self.frames + 1
if intensity > self.max_int:
self.max_int = intensity
if self.frames > 300: # TODO: test this number more (make it a setting??)
self.max_ints.append(self.max_int)
if intensity < self.min_int:
self.min_int = intensity
if len(self.max_ints) > 1:
if intensity > min(self.max_ints):
blinkvalue = 0.0
else:
blinkvalue = 0.8
try:
return blinkvalue
except:
return 0.8
# print(self.blinkvalue, self.max_int, self.min_int, self.frames, intensity)

82
EyeTrackApp/blob.py Normal file
View File

@ -0,0 +1,82 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
BLOB By: Prohurtz
Algorithm App Implimentations By: Prohurtz, qdot (Inital App Creator)
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import cv2
def BLOB(self):
global cct
# define circle
_, larger_threshold = cv2.threshold(
self.current_image_gray,
int(self.settings.gui_threshold),
255,
cv2.THRESH_BINARY,
)
try:
# Try rebuilding our contours
contours, _ = cv2.findContours(
larger_threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
# If we have no contours, we have nothing to blob track. Fail here.
if len(contours) == 0:
raise RuntimeError("No contours found for image")
except:
self.failed = self.failed + 1
pass
rows, cols = larger_threshold.shape
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
# if our blob width/height are within boundaries, call that good.
if (
not self.settings.gui_blob_minsize <= h <= self.settings.gui_blob_maxsize
or not self.settings.gui_blob_minsize <= w <= self.settings.gui_blob_maxsize
):
continue
cx = x + int(w / 2)
cy = y + int(h / 2)
cv2.drawContours(self.current_image_gray, [cnt], -1, (0, 0, 0), 3)
cv2.rectangle(self.current_image_gray, (x, y), (x + w, y + h), (0, 0, 0), 2)
# out_x, out_y = cal_osc(self, cx, cy) #filter and calibrate values
self.failed = 0
return cx, cy, larger_threshold
self.failed = self.failed + 1
return 0, 0, larger_threshold

View File

@ -1,10 +1,63 @@
from config import EyeTrackConfig """
from enum import Enum ------------------------------------------------------------------------------------------------------
import threading
import queue ,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import cv2 import cv2
import numpy as np
import queue
import serial
import serial.tools.list_ports
import threading
import time
from colorama import Fore
from config import EyeTrackCameraConfig
from enum import Enum
import psutil, os
import sys
process = psutil.Process(os.getpid()) # set process priority to low
try:
sys.getwindowsversion()
except AttributeError:
process.nice(10) # UNIX: 0 low 10 high
process.nice()
else:
process.nice(psutil.HIGH_PRIORITY_CLASS) # Windows
process.nice()
# See https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getpriorityclass#return-value for values
WAIT_TIME = 0.1 WAIT_TIME = 0.1
# Serial communication protocol:
# header-begin (2 bytes)
# header-type (2 bytes)
# packet-size (2 bytes)
# packet (packet-size bytes)
ETVR_HEADER = b"\xff\xa0"
ETVR_HEADER_FRAME = b"\xff\xa1"
ETVR_HEADER_LEN = 6
class CameraState(Enum): class CameraState(Enum):
@ -13,16 +66,26 @@ class CameraState(Enum):
DISCONNECTED = 2 DISCONNECTED = 2
def is_serial_capture_source(addr: str) -> bool:
"""
Returns True if the capture source address is a serial port.
"""
return (
addr.startswith("COM") or addr.startswith("/dev/cu") or addr.startswith("/dev/tty") # Windows # macOS # Linux
)
class Camera: class Camera:
def __init__( def __init__(
self, self,
config: EyeTrackConfig, config: EyeTrackCameraConfig,
camera_index: int, camera_index: int,
cancellation_event: "threading.Event", cancellation_event: "threading.Event",
capture_event: "threading.Event", capture_event: "threading.Event",
camera_status_outgoing: "queue.Queue[CameraState]", camera_status_outgoing: "queue.Queue[CameraState]",
camera_output_outgoing: "queue.Queue", camera_output_outgoing: "queue.Queue(maxsize=20)",
): ):
self.camera_status = CameraState.CONNECTING self.camera_status = CameraState.CONNECTING
self.config = config self.config = config
self.camera_index = camera_index self.camera_index = camera_index
@ -32,37 +95,87 @@ class Camera:
self.capture_event = capture_event self.capture_event = capture_event
self.cancellation_event = cancellation_event self.cancellation_event = cancellation_event
self.current_capture_source = config.capture_source self.current_capture_source = config.capture_source
self.wired_camera: "cv2.VideoCapture" = None self.cv2_camera: "cv2.VideoCapture" = None
self.error_message = "Capture source {} not found, retrying"
self.serial_connection = None
self.last_frame_time = time.time()
self.frame_number = 0
self.fps = 0
self.bps = 0
self.start = True
self.buffer = b""
self.pf_fps = 0
self.prevft = 0
self.newft = 0
self.fl = [0]
self.error_message = f"{Fore.YELLOW}[WARN] Capture source {{}} not found, retrying...{Fore.RESET}"
def __del__(self):
if self.serial_connection is not None:
self.serial_connection.close()
def set_output_queue(self, camera_output_outgoing: "queue.Queue"): def set_output_queue(self, camera_output_outgoing: "queue.Queue"):
self.camera_output_outgoing = camera_output_outgoing self.camera_output_outgoing = camera_output_outgoing
def run(self): def run(self):
OPENCV_PARAMS = [
cv2.CAP_PROP_OPEN_TIMEOUT_MSEC,
5000,
cv2.CAP_PROP_READ_TIMEOUT_MSEC,
5000,
]
while True: while True:
if self.cancellation_event.is_set(): if self.cancellation_event.is_set():
print("Exiting capture thread") print(f"{Fore.CYAN}[INFO] Exiting Capture thread{Fore.RESET}")
# openCV won't switch to a new source if provided with one
# so, we have to manually release the camera on exit
addr = str(self.current_capture_source)
if is_serial_capture_source(addr):
pass # TODO: find a nicer way to stop the com port
# self.serial_connection.close()
else:
self.cv2_camera.release()
return return
should_push = True should_push = True
# If things aren't open, retry until they are. Don't let read requests come in any earlier # If things aren't open, retry until they are. Don't let read requests come in any earlier
# than this, otherwise we can deadlock ourselves. # than this, otherwise we can deadlock ourselves.
if ( if self.config.capture_source != None and self.config.capture_source != "":
self.config.capture_source != None and self.config.capture_source != "" self.current_capture_source = self.config.capture_source
): addr = str(self.current_capture_source)
if ( if is_serial_capture_source(addr):
self.wired_camera is None if (
or not self.wired_camera.isOpened() self.serial_connection is None
or self.camera_status == CameraState.DISCONNECTED or self.camera_status == CameraState.DISCONNECTED
or self.config.capture_source != self.current_capture_source or self.config.capture_source != self.current_capture_source
): ):
print(self.error_message.format(self.config.capture_source)) port = self.config.capture_source
# This requires a wait, otherwise we can error and possible screw up the camera self.current_capture_source = port
# firmware. Fickle things. self.start_serial_connection(port)
if self.cancellation_event.wait(WAIT_TIME): else:
return if (
self.current_capture_source = self.config.capture_source self.cv2_camera is None
self.wired_camera = cv2.VideoCapture(self.current_capture_source) or not self.cv2_camera.isOpened()
should_push = False or self.camera_status == CameraState.DISCONNECTED
or self.config.capture_source != self.current_capture_source
):
print(self.error_message.format(self.config.capture_source))
# This requires a wait, otherwise we can error and possible screw up the camera
# firmware. Fickle things.
if self.cancellation_event.wait(WAIT_TIME):
return
self.current_capture_source = self.config.capture_source
# self.cv2_camera = cv2.VideoCapture(self.current_capture_source)
self.cv2_camera = cv2.VideoCapture()
self.cv2_camera.setExceptionMode(True)
# https://github.com/opencv/opencv/blob/4.8.0/modules/videoio/include/opencv2/videoio.hpp#L803
self.cv2_camera.open(self.current_capture_source)
should_push = False
else: else:
# We don't have a capture source to try yet, wait for one to show up in the GUI. # We don't have a capture source to try yet, wait for one to show up in the GUI.
if self.cancellation_event.wait(WAIT_TIME): if self.cancellation_event.wait(WAIT_TIME):
@ -70,39 +183,159 @@ class Camera:
return return
# Assuming we can access our capture source, wait for another thread to request a capture. # Assuming we can access our capture source, wait for another thread to request a capture.
# Cycle every so often to see if our cancellation token has fired. This basically uses a # Cycle every so often to see if our cancellation token has fired. This basically uses a
# python event as a contextless, resettable one-shot channel. # python event as a context-less, resettable one-shot channel.
if should_push and not self.capture_event.wait(timeout=0.02): if should_push and not self.capture_event.wait(timeout=0.001):
continue continue
if self.config.capture_source != None:
addr = str(self.current_capture_source)
if is_serial_capture_source(addr):
self.get_serial_camera_picture(should_push)
else:
self.get_cv2_camera_picture(should_push)
if not should_push:
# if we get all the way down here, consider ourselves connected
self.camera_status = CameraState.CONNECTED
self.get_wired_camera_picture(should_push) def get_cv2_camera_picture(self, should_push):
if not should_push:
# if we get all the way down here, consider ourselves connected
self.camera_status = CameraState.CONNECTED
def get_wired_camera_picture(self, should_push):
try: try:
ret, image = self.wired_camera.read() ret, image = self.cv2_camera.read()
height, width = image.shape[:2] # Calculate the aspect ratio
if int(width) > 680:
aspect_ratio = float(width) / float(
height
) # Determine the new height based on the desired maximum width
new_height = int(680 / aspect_ratio)
image = cv2.resize(image, (680, new_height))
if not ret: if not ret:
self.wired_camera.set(cv2.CAP_PROP_POS_FRAMES, 0) self.cv2_camera.set(cv2.CAP_PROP_POS_FRAMES, 0)
raise RuntimeError("Problem while getting frame") raise RuntimeError("Problem while getting frame")
frame_number = self.wired_camera.get(cv2.CAP_PROP_POS_FRAMES) frame_number = self.cv2_camera.get(cv2.CAP_PROP_POS_FRAMES)
fps = self.wired_camera.get(cv2.CAP_PROP_FPS) current_frame_time = time.time()
delta_time = current_frame_time - self.last_frame_time
if delta_time > 0:
current_fps = 1 / delta_time
else:
current_fps = 0
self.last_frame_time = current_frame_time
if len(self.fl) < 60:
self.fl.append(current_fps)
else:
self.fl.pop(0)
self.fl.append(current_fps)
self.fps = sum(self.fl) / len(self.fl)
self.bps = image.nbytes * self.fps
if should_push: if should_push:
self.push_image_to_queue(image, frame_number, fps) self.push_image_to_queue(image, frame_number, self.fps)
except: except:
print( print(
"Capture source problem, assuming camera disconnected, waiting for reconnect." f"{Fore.YELLOW}[WARN] Capture source problem, assuming camera disconnected, waiting for reconnect.{Fore.RESET}"
) )
self.camera_status = CameraState.DISCONNECTED self.camera_status = CameraState.DISCONNECTED
pass pass
def get_next_packet_bounds(self):
beg = -1
while beg == -1:
self.buffer += self.serial_connection.read(2048)
beg = self.buffer.find(ETVR_HEADER + ETVR_HEADER_FRAME)
# Discard any data before the frame header.
if beg > 0:
self.buffer = self.buffer[beg:]
beg = 0
# We know exactly how long the jpeg packet is
end = int.from_bytes(self.buffer[4:6], signed=False, byteorder="little")
self.buffer += self.serial_connection.read(end - len(self.buffer))
return beg, end
def get_next_jpeg_frame(self):
beg, end = self.get_next_packet_bounds()
jpeg = self.buffer[beg + ETVR_HEADER_LEN : end + ETVR_HEADER_LEN]
self.buffer = self.buffer[end + ETVR_HEADER_LEN :]
return jpeg
def get_serial_camera_picture(self, should_push):
conn = self.serial_connection
if conn is None:
return
try:
if conn.in_waiting:
jpeg = self.get_next_jpeg_frame()
if jpeg:
# Create jpeg frame from byte string
image = cv2.imdecode(np.fromstring(jpeg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
if image is None:
print(f"{Fore.YELLOW}[WARN] Frame drop. Corrupted JPEG.{Fore.RESET}")
return
# Discard the serial buffer. This is due to the fact that it
# may build up some outdated frames. A bit of a workaround here tbh.
if conn.in_waiting >= 32768:
print(f"{Fore.CYAN}[INFO] Discarding the serial buffer ({conn.in_waiting} bytes){Fore.RESET}")
conn.reset_input_buffer()
self.buffer = b""
# Calculate the fps.
current_frame_time = time.time()
delta_time = current_frame_time - self.last_frame_time
self.last_frame_time = current_frame_time
self.fps = (self.fps + self.pf_fps) / 2
self.newft = time.time()
self.fps = 1 / (self.newft - self.prevft)
self.prevft = self.newft
self.fps = int(self.fps)
if len(self.fl) < 60:
self.fl.append(self.fps)
else:
self.fl.pop(0)
self.fl.append(self.fps)
self.fps = sum(self.fl) / len(self.fl)
self.bps = image.nbytes * self.fps
self.frame_number = self.frame_number + 1
if should_push:
self.push_image_to_queue(image, self.frame_number, self.fps)
except Exception:
print(
f"{Fore.YELLOW}[WARN] Serial capture source problem, assuming camera disconnected, waiting for reconnect.{Fore.RESET}"
)
conn.close()
self.camera_status = CameraState.DISCONNECTED
pass
def start_serial_connection(self, port):
if self.serial_connection is not None and self.serial_connection.is_open:
# Do nothing. The connection is already open on this port.
if self.serial_connection.port == port:
return
# Otherwise, close the connection before trying to reopen.
self.serial_connection.close()
com_ports = [tuple(p) for p in list(serial.tools.list_ports.comports())]
# Do not try connecting if no such port i.e. device was unplugged.
if not any(p for p in com_ports if port in p):
return
try:
rate = 115200 if sys.platform == "darwin" else 3000000 # Higher baud rate not working on macOS
conn = serial.Serial(baudrate=rate, port=port, xonxoff=False, dsrdtr=False, rtscts=False)
# Set explicit buffer size for serial.
if sys.platform == "win32":
buffer_size = 32768
conn.set_buffer_size(rx_size=buffer_size, tx_size=buffer_size)
print(f"{Fore.CYAN}[INFO] ETVR Serial Tracker device connected on {port}{Fore.RESET}")
self.serial_connection = conn
self.camera_status = CameraState.CONNECTED
except Exception:
print(f"{Fore.CYAN}[INFO] Failed to connect on {port}{Fore.RESET}")
self.camera_status = CameraState.DISCONNECTED
def push_image_to_queue(self, image, frame_number, fps): def push_image_to_queue(self, image, frame_number, fps):
# If there's backpressure, just yell. We really shouldn't have this unless we start getting # If there's backpressure, just yell. We really shouldn't have this unless we start getting
# some sort of capture event conflict though. # some sort of capture event conflict though.
qsize = self.camera_output_outgoing.qsize() qsize = self.camera_output_outgoing.qsize()
if qsize > 1: if qsize > 1:
print( print(
f"CAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM." f"{Fore.YELLOW}[WARN] CAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM.{Fore.RESET}"
) )
self.camera_output_outgoing.put((image, frame_number, fps)) self.camera_output_outgoing.put((image, frame_number, fps))
self.capture_event.clear() self.capture_event.clear()

View File

@ -1,22 +1,54 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import PySimpleGUI as sg import PySimpleGUI as sg
from config import EyeTrackConfig from config import EyeTrackConfig
from config import EyeTrackSettingsConfig from collections import deque
from threading import Event, Thread from threading import Event, Thread
from eye_processor import EyeProcessor, InformationOrigin import math
from enum import Enum from eye import EyeId
from eye_processor import EyeProcessor, EyeInfoOrigin
from queue import Queue, Empty from queue import Queue, Empty
from camera import Camera, CameraState from camera import Camera, CameraState
from osc import EyeId
import cv2 import cv2
from winsound import PlaySound, SND_FILENAME, SND_ASYNC from osc.OSCMessage import OSCMessageType, OSCMessage
import traceback from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC, resource_path
import numpy as np
# for clarity when indexing
X = 0
Y = 1
class CameraWidget: class CameraWidget:
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue: Queue): def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue: Queue):
self.gui_camera_addr = f"-CAMERAADDR{widget_id}-" self.gui_camera_addr = f"-CAMERAADDR{widget_id}-"
self.gui_threshold_slider = f"-THREADHOLDSLIDER{widget_id}-"
self.gui_rotation_slider = f"-ROTATIONSLIDER{widget_id}-" self.gui_rotation_slider = f"-ROTATIONSLIDER{widget_id}-"
self.gui_rotation_ui_padding = f"-ROTATIONUIPADDING{widget_id}-"
self.gui_roi_button = f"-ROIMODE{widget_id}-" self.gui_roi_button = f"-ROIMODE{widget_id}-"
self.gui_roi_layout = f"-ROILAYOUT{widget_id}-" self.gui_roi_layout = f"-ROILAYOUT{widget_id}-"
self.gui_roi_selection = f"-GRAPH{widget_id}-" self.gui_roi_selection = f"-GRAPH{widget_id}-"
@ -24,13 +56,18 @@ class CameraWidget:
self.gui_save_tracking_button = f"-SAVETRACKINGBUTTON{widget_id}-" self.gui_save_tracking_button = f"-SAVETRACKINGBUTTON{widget_id}-"
self.gui_tracking_layout = f"-TRACKINGLAYOUT{widget_id}-" self.gui_tracking_layout = f"-TRACKINGLAYOUT{widget_id}-"
self.gui_tracking_image = f"-IMAGE{widget_id}-" self.gui_tracking_image = f"-IMAGE{widget_id}-"
self.gui_tracking_fps = f"-TRACKINGFPS{widget_id}-"
self.gui_tracking_bps = f"-TRACKINGBPS{widget_id}-"
self.gui_output_graph = f"-OUTPUTGRAPH{widget_id}-" self.gui_output_graph = f"-OUTPUTGRAPH{widget_id}-"
self.gui_restart_calibration = f"-RESTARTCALIBRATION{widget_id}-" self.gui_restart_calibration = f"-RESTARTCALIBRATION{widget_id}-"
self.gui_stop_calibration = f"-STOPCALIBRATION{widget_id}-"
self.gui_recenter_eyes = f"-RECENTEREYES{widget_id}-" self.gui_recenter_eyes = f"-RECENTEREYES{widget_id}-"
self.gui_mode_readout = f"-APPMODE{widget_id}-" self.gui_mode_readout = f"-APPMODE{widget_id}-"
self.gui_circular_crop = f"-CIRCLECROP{widget_id}-"
self.gui_roi_message = f"-ROIMESSAGE{widget_id}-" self.gui_roi_message = f"-ROIMESSAGE{widget_id}-"
self.gui_mask_markup = f"-MARKUP{widget_id}-"
self.gui_mask_lighten = f"-LIGHTEN{widget_id}-"
self.last_eye_info = None
self.osc_queue = osc_queue self.osc_queue = osc_queue
self.main_config = main_config self.main_config = main_config
self.eye_id = widget_id self.eye_id = widget_id
@ -43,91 +80,7 @@ class CameraWidget:
elif self.eye_id == EyeId.LEFT: elif self.eye_id == EyeId.LEFT:
self.config = main_config.left_eye self.config = main_config.left_eye
else: else:
raise RuntimeError("Cannot have a camera widget represent both eyes!") raise RuntimeError("\033[91m[WARN] Cannot have a camera widget represent both eyes!\033[0m")
self.roi_layout = [
[
sg.Graph(
(640, 480),
(0, 480),
(640, 0),
key=self.gui_roi_selection,
drag_submits=True,
enable_events=True,
background_color='#424042',
)
]
]
# Define the window's contents
self.tracking_layout = [
[
sg.Text("Threshold", background_color='#424042'),
sg.Slider(
range=(0, 110),
default_value=self.config.threshold,
orientation="h",
key=self.gui_threshold_slider,
background_color='#424042'
),
],
[
sg.Text("Rotation", background_color='#424042'),
sg.Slider(
range=(0, 360),
default_value=self.config.rotation_angle,
orientation="h",
key=self.gui_rotation_slider,
background_color='#424042'
),
],
[
sg.Button("Restart Calibration", key=self.gui_restart_calibration, button_color='#6f4ca1'),
sg.Button("Recenter Eyes", key=self.gui_recenter_eyes, button_color='#6f4ca1'),
],
[
sg.Text("Mode:", background_color='#424042'),
sg.Text("Calibrating", key=self.gui_mode_readout, background_color='#424042'),
sg.Checkbox(
"Circle crop:",
default=self.config.gui_circular_crop,
key=self.gui_circular_crop,
background_color='#424042',
),
],
[sg.Image(filename="", key=self.gui_tracking_image)],
[
sg.Graph(
(200, 200),
(-100, 100),
(100, -100),
background_color="white",
key=self.gui_output_graph,
drag_submits=True,
enable_events=True,
),
sg.Text("Please set an Eye Cropping.", key=self.gui_roi_message, background_color='#424042', visible=False),
],
]
self.widget_layout = [
[
sg.Text("Camera Address", background_color='#424042'),
sg.InputText(self.config.capture_source, key=self.gui_camera_addr),
],
[
sg.Button("Save and Restart Tracking", key=self.gui_save_tracking_button, button_color='#6f4ca1'),
],
[
sg.Button("Tracking Mode", key=self.gui_tracking_button, button_color='#6f4ca1'),
sg.Button("Cropping Mode", key=self.gui_roi_button, button_color='#6f4ca1'),
],
[
sg.Column(self.tracking_layout, key=self.gui_tracking_layout, background_color='#424042'),
sg.Column(self.roi_layout, key=self.gui_roi_layout, background_color='#424042', visible=False),
],
]
self.cancellation_event = Event() self.cancellation_event = Event()
# Set the event until start is called, otherwise we can block if shutdown is called. # Set the event until start is called, otherwise we can block if shutdown is called.
@ -141,11 +94,13 @@ class CameraWidget:
self.ransac = EyeProcessor( self.ransac = EyeProcessor(
self.config, self.config,
self.settings_config, self.settings_config,
main_config,
self.cancellation_event, self.cancellation_event,
self.capture_event, self.capture_event,
self.capture_queue, self.capture_queue,
self.image_queue, self.image_queue,
self.eye_id, self.eye_id,
self.osc_queue,
) )
self.camera_status_queue = Queue() self.camera_status_queue = Queue()
@ -158,11 +113,210 @@ class CameraWidget:
self.capture_queue, self.capture_queue,
) )
self.x0, self.y0 = None, None self.hover = None
self.x1, self.y1 = None, None
self.figure = None # cartesian co-ordinates in widget space are used during selection
self.xy0 = None
self.xy1 = None
self.cartesian_needs_update = False
# polar co-ordinates from the image center are the canonical representation
self.cr, self.ca = None, None
self.roi_size = None
self.clip_size = None
self.clip_pos = None
self.padded_size = [244, 244]
self.img_pos = None
self.roi_image_center = None
self.is_mouse_up = True self.is_mouse_up = True
self.hover_pos = None
self.in_roi_mode = False self.in_roi_mode = False
self.movavg_fps_queue = deque(maxlen=120)
self.movavg_bps_queue = deque(maxlen=120)
def get_widget_layout(self):
self.widget_layout = [
[
sg.Text("Camera Address", background_color="#424042"),
sg.InputText(
self.config.capture_source,
key=self.gui_camera_addr,
tooltip="Enter the IP address or UVC port of your camera. (Include the 'http://')",
),
],
[
sg.Button(
"Save and Restart Tracking",
key=self.gui_save_tracking_button,
button_color="#6f4ca1",
),
],
[
sg.Button(
"Tracking Mode",
key=self.gui_tracking_button,
button_color="#6f4ca1",
tooltip="Go here to track your eye.",
),
sg.Button(
"Cropping Mode",
key=self.gui_roi_button,
button_color="#6f4ca1",
tooltip="Go here to crop out your eye.",
),
],
[
sg.Column(
self.tracking_layout,
key=self.gui_tracking_layout,
background_color="#424042",
),
sg.Column(
self.roi_layout,
key=self.gui_roi_layout,
background_color="#424042",
visible=False,
),
],
]
def get_roi_layout(self):
self.roi_layout = [
[
# sg.Button(
# "Mark Out",
# key=self.gui_mask_markup,
# button_color="#6f4ca1",
# tooltip="Mark out stuff that is not your eye.",
# ),
# sg.Button(
# "Lighten",
# key=self.gui_mask_lighten,
# button_color="#6f4ca1",
# tooltip="Lighten shadowed areas.",
# ),
sg.Text("Rotation", background_color="#424042"),
sg.Slider(
range=(0, 360),
default_value=self.config.rotation_angle,
orientation="h",
key=self.gui_rotation_slider,
background_color="#424042",
tooltip="Adjust the rotation of your cameras, make them level.",
),
sg.Checkbox(
"Camera Widget Padding",
default=self.config.gui_rotation_ui_padding,
tooltip="Pad the camera view widget enough to allow a full rotation.",
key=self.gui_rotation_ui_padding,
background_color="#424042",
),
],
[
sg.Graph(
(640, 480),
(0, 480),
(640, 0),
key=self.gui_roi_selection,
drag_submits=True,
enable_events=True,
motion_events=True,
background_color="#424042",
),
],
]
def get_tracking_layout(self):
# Define the window's contents
self.tracking_layout = [
[
sg.Button(
"Start Calibration",
key=self.gui_restart_calibration,
button_color="#6f4ca1",
tooltip="Start eye calibration. Look all arround to all extreams without blinking until sound is heard.",
),
sg.Button(
"Stop Calibration",
key=self.gui_stop_calibration,
button_color="#6f4ca1",
tooltip="Stop eye calibration manualy.",
),
sg.Button(
"Recenter Eyes",
key=self.gui_recenter_eyes,
button_color="#6f4ca1",
tooltip="Make your eyes center again.",
),
],
[
sg.Text("Mode:", background_color="#424042"),
sg.Text("Calibrating", key=self.gui_mode_readout, background_color="#424042"),
sg.Text("", key=self.gui_tracking_fps, background_color="#424042"),
sg.Text("", key=self.gui_tracking_bps, background_color="#424042"),
# sg.Checkbox(
# "Circle crop:",
# default=self.config.gui_circular_crop,
# key=self.gui_circular_crop,
# background_color='#424042',
# tooltip = "Circle crop only applies to RANSAC3D and Blob.",
# ),
],
[sg.Image(filename="", key=self.gui_tracking_image)],
[
sg.Graph(
(200, 200),
(-100, 100),
(100, -100),
background_color="white",
key=self.gui_output_graph,
drag_submits=True,
enable_events=True,
),
sg.Text(
"Please set an Eye Cropping.",
key=self.gui_roi_message,
background_color="#424042",
visible=False,
),
],
]
def update_layouts(self):
self.get_roi_layout()
self.get_tracking_layout()
self.get_widget_layout()
def _movavg_fps(self, next_fps):
self.movavg_fps_queue.append(next_fps)
fps = round(sum(self.movavg_fps_queue) / len(self.movavg_fps_queue))
millisec = round((1 / fps if fps else 0) * 1000)
return f"{fps} Fps {millisec} ms"
def _movavg_bps(self, next_bps):
self.movavg_bps_queue.append(next_bps)
return f"{sum(self.movavg_bps_queue) / len(self.movavg_bps_queue) * 0.001 * 0.001 * 8:.3f} Mbps"
def _cartesian_to_polar(self):
if not (self.xy0 is None or self.xy1 is None):
roi_center = (self.xy0 + self.xy1) / 2 - self.roi_image_center
self.cr = np.linalg.norm(roi_center)
self.ca = math.atan2(roi_center[Y], roi_center[X]) + math.radians(self.config.rotation_angle)
self.roi_size = np.abs(self.xy1 - self.xy0)
def _polar_to_cartesian_at_angle(self, rotation_angle_radians):
if not (self.cr is None or self.ca is None or self.roi_size is None):
ca = self.ca - rotation_angle_radians
cx = math.cos(ca) * self.cr + self.roi_image_center[X]
cy = math.sin(ca) * self.cr + self.roi_image_center[Y]
roi_pos = np.array((int(cx), int(cy))) - self.roi_size // 2
return (roi_pos, roi_pos + self.roi_size)
else:
return (None, None)
def _polar_to_cartesian(self):
if not (self.cr is None or self.ca is None or self.roi_size is None):
(self.xy0), (self.xy1) = self._polar_to_cartesian_at_angle(math.radians(self.config.rotation_angle))
def started(self): def started(self):
return not self.cancellation_event.is_set() return not self.cancellation_event.is_set()
@ -185,148 +339,354 @@ class CameraWidget:
self.ransac_thread.join() self.ransac_thread.join()
self.camera_thread.join() self.camera_thread.join()
def on_config_update(self, data):
keys = set(data.keys())
model_keys = set(self.config.model_fields.keys())
# we only want to restart our stuff, if our stuff got updated
# at the model level
if model_keys.intersection(keys):
self.stop()
self.start()
def recenter_eyes(self):
self.settings.gui_recenter_eyes = True
def recalibrate_eyes(self):
self.ransac.calibration_frame_counter = self.settings.calibration_samples
self.ransac.ibo.clear_filter()
PlaySound(resource_path("Audio/start.wav"), SND_FILENAME | SND_ASYNC)
def osc_recenter_eyes(self, osc_message: OSCMessage):
if not isinstance(osc_message.data, bool):
return # just incase we get anything other than bool
if osc_message.data:
self.recenter_eyes()
def osc_recalibrate_eyes(self, osc_message: OSCMessage):
if not isinstance(osc_message.data, bool):
return # just incase we get anything other than bool
if osc_message.data:
self.recalibrate_eyes()
def render(self, window, event, values): def render(self, window, event, values):
changed = False changed = False
# If anything has changed in our configuration settings, change/update those.
if (
event == self.gui_save_tracking_button
and values[self.gui_camera_addr] != self.config.capture_source
):
print("New value: {}".format(values[self.gui_camera_addr]))
try:
# Try storing ints as ints, for those using wired cameras.
self.config.capture_source = int(values[self.gui_camera_addr])
except ValueError:
if values[self.gui_camera_addr] == "":
self.config.capture_source = None
else:
self.config.capture_source = values[self.gui_camera_addr]
changed = True
if self.config.threshold != values[self.gui_threshold_slider]: if self.settings.gui_disable_gui == False:
self.config.threshold = int(values[self.gui_threshold_slider])
changed = True
if self.config.rotation_angle != values[self.gui_rotation_slider]: # If anything has changed in our configuration settings, change/update those.
self.config.rotation_angle = int(values[self.gui_rotation_slider]) if event == self.gui_save_tracking_button and values[self.gui_camera_addr] != self.config.capture_source:
changed = True print("\033[94m[INFO] New value: {}\033[0m".format(values[self.gui_camera_addr]))
try:
# Try storing ints as ints, for those using wired cameras.
self.config.capture_source = int(values[self.gui_camera_addr])
except ValueError:
if values[self.gui_camera_addr] == "":
self.config.capture_source = None
else:
if (
len(values[self.gui_camera_addr]) > 5
and "http" not in values[self.gui_camera_addr]
and ".mp4" not in values[self.gui_camera_addr]
and "/dev" not in values[self.gui_camera_addr]
): # If http is not in camera address, add it.
self.config.capture_source = f"http://{values[self.gui_camera_addr]}/"
else:
self.config.capture_source = values[self.gui_camera_addr]
changed = True
if self.config.gui_circular_crop != values[self.gui_circular_crop]: if self.config.rotation_angle != int(values[self.gui_rotation_slider]):
self.config.gui_circular_crop = values[self.gui_circular_crop] self.config.rotation_angle = int(values[self.gui_rotation_slider])
changed = True changed = True
self.cartesian_needs_update = True
if changed: if self.config.gui_rotation_ui_padding != bool(values[self.gui_rotation_ui_padding]):
self.main_config.save() self.config.gui_rotation_ui_padding = bool(values[self.gui_rotation_ui_padding])
changed = True
self.cartesian_needs_update = True
if event == self.gui_tracking_button: # if self.config.gui_circular_crop != values[self.gui_circular_crop]:
print("Moving to tracking mode") # self.config.gui_circular_crop = values[self.gui_circular_crop]
self.in_roi_mode = False # changed = True
self.camera.set_output_queue(self.capture_queue)
window[self.gui_roi_layout].update(visible=False)
window[self.gui_tracking_layout].update(visible=True)
if event == self.gui_roi_button: if changed:
print("Move to roi mode")
self.in_roi_mode = True
self.camera.set_output_queue(self.roi_queue)
window[self.gui_roi_layout].update(visible=True)
window[self.gui_tracking_layout].update(visible=False)
if event == "{}+UP".format(self.gui_roi_selection):
# Event for mouse button up in ROI mode
self.is_mouse_up = True
if abs(self.x0 - self.x1) != 0 and abs(self.y0 - self.y1) != 0:
self.config.roi_window_x = min([self.x0, self.x1])
self.config.roi_window_y = min([self.y0, self.y1])
self.config.roi_window_w = abs(self.x0 - self.x1)
self.config.roi_window_h = abs(self.y0 - self.y1)
self.main_config.save() self.main_config.save()
if event == self.gui_roi_selection: if event == self.gui_tracking_button:
# Event for mouse button down or mouse drag in ROI mode self.get_tracking_layout()
if self.is_mouse_up: print("\033[94m[INFO] Moving to tracking mode\033[0m")
self.is_mouse_up = False self.in_roi_mode = False
self.x0, self.y0 = values[self.gui_roi_selection] self.camera.set_output_queue(self.capture_queue)
self.x1, self.y1 = values[self.gui_roi_selection] window[self.gui_roi_layout].update(visible=False)
window[self.gui_tracking_layout].update(visible=True)
if event == self.gui_restart_calibration: if event == self.gui_roi_button:
self.ransac.calibration_frame_counter = 300 self.get_roi_layout()
PlaySound('Audio/start.wav', SND_FILENAME | SND_ASYNC) print("\033[94m[INFO] Move to roi mode\033[0m")
self.in_roi_mode = True
self.camera.set_output_queue(self.roi_queue)
window[self.gui_roi_layout].update(visible=True)
window[self.gui_tracking_layout].update(visible=False)
if event == self.gui_recenter_eyes: if event == "{}+UP".format(self.gui_roi_selection):
self.settings.gui_recenter_eyes = True # Event for mouse button up in ROI mode
self.is_mouse_up = True
print("UP")
self.xy0 = np.clip(self.xy0, self.clip_pos, self.clip_pos + self.clip_size)
self.xy1 = np.clip(self.xy1, self.clip_pos, self.clip_pos + self.clip_size)
self._cartesian_to_polar()
if all(abs(self.xy0 - self.xy1) != 0):
xy0, xy1 = self._polar_to_cartesian_at_angle(0)
needs_roi_set = self.config.roi_window_h <= 0 or self.config.roi_window_w <= 0 self.config.roi_window_x, self.config.roi_window_y = (np.minimum(xy0, xy1) - self.img_pos).tolist()
self.config.roi_window_w, self.config.roi_window_h = (np.abs(xy0 - xy1)).tolist()
self.main_config.save()
if self.config.capture_source is None or self.config.capture_source == "": if event == self.gui_roi_selection:
window[self.gui_mode_readout].update("Waiting for camera address") # Event for mouse button down or mouse drag in ROI mode
window[self.gui_roi_message].update(visible=False) self.hover_pos = None
window[self.gui_output_graph].update(visible=False)
elif self.camera.camera_status == CameraState.CONNECTING:
window[self.gui_mode_readout].update("Camera Connecting")
elif self.camera.camera_status == CameraState.DISCONNECTED:
window[self.gui_mode_readout].update("CAMERA DISCONNECTED")
elif needs_roi_set:
window[self.gui_mode_readout].update("Awaiting Eye Cropping Setting")
elif self.ransac.calibration_frame_counter != None:
window[self.gui_mode_readout].update("Calibration")
else:
window[self.gui_mode_readout].update("Tracking")
if self.in_roi_mode: if self.is_mouse_up:
try: self.is_mouse_up = False
if self.roi_queue.empty(): self.xy0 = np.array(values[self.gui_roi_selection])
self.capture_event.set()
maybe_image = self.roi_queue.get(block=False) self.xy1 = np.array(values[self.gui_roi_selection])
imgbytes = cv2.imencode(".ppm", maybe_image[0])[1].tobytes()
graph = window[self.gui_roi_selection] self._cartesian_to_polar()
if self.figure:
graph.delete_figure(self.figure) if event == "{}+MOVE".format(self.gui_roi_selection):
# INCREDIBLY IMPORTANT ERASE. Drawing images does NOT overwrite the buffer, the fucking if self.is_mouse_up:
# graph keeps every image fed in until you call this. Therefore we have to make sure we self.hover_pos = np.array(values[self.gui_roi_selection])
# erase before we redraw, otherwise we'll leak memory *very* quickly.
graph.erase() if self.padded_size is not None:
graph.draw_image(data=imgbytes, location=(0, 0)) if any(self.hover_pos > self.padded_size):
if None not in (self.x0, self.y0, self.x1, self.y1): self.hover_pos = None
self.figure = graph.draw_rectangle(
(self.x0, self.y0), (self.x1, self.y1), line_color="#6f4ca1" if event == self.gui_restart_calibration:
) self.recalibrate_eyes()
except Empty:
pass if event == self.gui_stop_calibration:
else: self.ransac.calibration_frame_counter = 0
if needs_roi_set:
window[self.gui_roi_message].update(visible=True) if event == self.gui_recenter_eyes:
self.recenter_eyes()
needs_roi_set = self.config.roi_window_h <= 0 or self.config.roi_window_w <= 0
# TODO: Refactor if statements below...
window[self.gui_tracking_fps].update("")
window[self.gui_tracking_bps].update("")
if self.config.capture_source is None or self.config.capture_source == "":
window[self.gui_mode_readout].update("Waiting for camera address")
window[self.gui_roi_message].update(visible=False)
window[self.gui_output_graph].update(visible=False) window[self.gui_output_graph].update(visible=False)
return elif self.camera.camera_status == CameraState.CONNECTING:
window[self.gui_mode_readout].update("Camera Connecting")
elif self.camera.camera_status == CameraState.DISCONNECTED:
window[self.gui_mode_readout].update("Camera Reconnecting...")
elif needs_roi_set:
window[self.gui_mode_readout].update("Awaiting Eye Crop")
elif self.ransac.calibration_frame_counter != None:
window[self.gui_mode_readout].update("Calibration")
else:
window[self.gui_mode_readout].update("Tracking")
window[self.gui_tracking_fps].update(self._movavg_fps(self.camera.fps))
window[self.gui_tracking_bps].update(self._movavg_bps(self.camera.bps))
# if event == self.gui_mask_lighten:
# while True:
# try:
# maybe_image = self.roi_queue.get(block=False)
# imgbytes = cv2.imencode(".ppm", maybe_image[0])[1].tobytes()
# image = cv2.imdecode(
# np.frombuffer(imgbytes, np.uint8), cv2.IMREAD_COLOR
# )
# cv2.imshow("Image", image)
# cv2.waitKey(1)
# cv2.destroyAllWindows()
# print("lighen")
# except Empty:
# pass
# if event == self.gui_mask_markup:
# print("markup")
if self.in_roi_mode:
try:
if self.roi_queue.empty():
self.capture_event.set()
maybe_image = self.roi_queue.get(block=False)
if maybe_image:
image = maybe_image[0]
img_h, img_w, _ = image.shape
hyp = math.ceil((img_w**2 + img_h**2) ** 0.5)
rotation_matrix = cv2.getRotationMatrix2D(
((img_w / 2), (img_h / 2)), self.config.rotation_angle, 1
)
# calculate position of all four corners of image
# calculate crop corner locations in original image space
x_coords, y_coords = np.matmul(
rotation_matrix,
np.transpose([[0, 0, 1], [img_w, 0, 1], [0, img_h, 1], [img_w, img_h, 1]]),
)
self.clip_size = np.array(
[math.ceil(max(x_coords) - min(x_coords)), math.ceil(max(y_coords) - min(y_coords))]
)
if self.config.gui_rotation_ui_padding:
self.padded_size = np.array([hyp, hyp])
else:
self.padded_size = self.clip_size
self.img_pos = ((self.padded_size - (img_w, img_h)) / 2).astype(np.int32)
self.clip_pos = ((self.padded_size - self.clip_size) / 2).astype(np.int32)
self.roi_image_center = self.padded_size / 2
# deferred to after roi_image_center is updated
if self.cartesian_needs_update:
self._polar_to_cartesian()
self.cartesian_needs_update = False
pad_matrix = np.float32([[1, 0, self.img_pos[X]], [0, 1, self.img_pos[Y]], [0, 0, 1]])
rotation_matrix_padded = cv2.getRotationMatrix2D(
self.roi_image_center, self.config.rotation_angle, 1
)
matrix = np.matmul(rotation_matrix_padded, pad_matrix)
image = cv2.warpAffine(
image,
matrix,
self.padded_size,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(128, 128, 128),
)
maybe_image = (image, *maybe_image[1:])
imgbytes = cv2.imencode(".ppm", maybe_image[0])[1].tobytes()
graph = window[self.gui_roi_selection]
# INCREDIBLY IMPORTANT ERASE. Drawing images does NOT overwrite the buffer, the fucking
# graph keeps every image fed in until you call this. Therefore we have to make sure we
# erase before we redraw, otherwise we'll leak memory *very* quickly.
graph.erase()
graph.draw_image(data=imgbytes, location=(0, 0))
def make_dashed(spawn_item, dark="#000000", light="#ffffff", duty=1):
pixel_duty = math.floor(4 * duty)
for (color, dashoffset) in [(dark, 0), (light, 4)]:
item = spawn_item(color)
graph._TKCanvas2.itemconfig(item, dash=(pixel_duty, 8 - pixel_duty), dashoffset=dashoffset)
if self.xy0 is None or self.xy1 is None:
# roi_window rotates around roi center, we rotate around image center
# TODO: it would be nice if they were more consistent
roi_window_pos = (self.config.roi_window_x, self.config.roi_window_y)
roi_window_size = (self.config.roi_window_w, self.config.roi_window_h)
self.xy0 = roi_window_pos + self.img_pos
self.xy1 = self.xy0 + roi_window_size
self._cartesian_to_polar()
self.ca -= math.radians(self.config.rotation_angle)
self._polar_to_cartesian()
style = {}
if self.is_mouse_up:
style = {"dark": "#7f78ff", "light": "#d002ff", "duty": 0.5}
make_dashed(
lambda color: graph.draw_rectangle(
self.xy0,
self.xy1,
line_color=color,
),
**style,
)
if self.is_mouse_up and self.hover_pos is not None:
make_dashed(
lambda color: graph.draw_line(
(self.hover_pos[X], 0), (self.hover_pos[X], self.padded_size[Y]), color=color
)
)
make_dashed(
lambda color: graph.draw_line(
(0, self.hover_pos[Y]), (self.padded_size[X], self.hover_pos[Y]), color=color
)
)
except Empty:
pass
else:
if needs_roi_set:
window[self.gui_roi_message].update(visible=True)
window[self.gui_output_graph].update(visible=False)
return
try:
window[self.gui_roi_message].update(visible=False)
window[self.gui_output_graph].update(visible=True)
(maybe_image, eye_info) = self.image_queue.get(block=False)
imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes()
window[self.gui_tracking_image].update(data=imgbytes)
# Update the GUI
graph = window[self.gui_output_graph]
graph.erase()
if eye_info.info_type != EyeInfoOrigin.FAILURE: # and not eye_info.blink:
graph.update(background_color="white")
if not np.isnan(eye_info.x) and not np.isnan(eye_info.y):
graph.draw_circle(
(eye_info.x * -100, eye_info.y * -100),
eye_info.pupil_dilation * 25,
fill_color="black",
line_color="white",
)
else:
graph.draw_circle(
(0.0 * -100, 0.0 * -100),
20,
fill_color="black",
line_color="white",
)
if not np.isnan(eye_info.blink):
graph.draw_line(
(-100, 100), # Start at the bottom (-100)
(-100, (eye_info.blink * 200) - 100), # Scale and adjust to the -100 to 100 range
color="#6f4ca1",
width=10,
)
else:
graph.draw_line((-100, 0.5 * 200), (-100, 100), color="#6f4ca1", width=10)
if eye_info.blink <= 0.0:
graph.update(background_color="#6f4ca1")
elif eye_info.info_type == EyeInfoOrigin.FAILURE:
graph.update(background_color="red")
except Empty:
pass
else:
def back(*args):
pass
try: try:
window[self.gui_roi_message].update(visible=False) window[self.gui_roi_message].update(visible=False)
window[self.gui_output_graph].update(visible=True) window[self.gui_output_graph].update(visible=False)
(maybe_image, eye_info) = self.image_queue.get(block=False) (maybe_image, eye_info) = self.image_queue.get(block=False)
imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes()
window[self.gui_tracking_image].update(data=imgbytes)
# Update the GUI
graph = window[self.gui_output_graph]
graph.erase()
if eye_info.info_type != InformationOrigin.FAILURE and not eye_info.blink:
graph.update(background_color="white")
try:
graph.draw_circle(
(eye_info.x * -100, eye_info.y * -100),
25,
fill_color="black",
line_color="white",
)
except:
pass
elif eye_info.blink:
graph.update(background_color="#6f4ca1")
elif eye_info.info_type == InformationOrigin.FAILURE:
graph.update(background_color="red")
# Relay information to OSC
if eye_info.info_type != InformationOrigin.FAILURE:
self.osc_queue.put((self.eye_id, eye_info))
except Empty: except Empty:
pass pass

View File

@ -1,28 +1,145 @@
from typing import Union, Dict """
from osc import EyeId ------------------------------------------------------------------------------------------------------
import os.path
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import json import json
import os.path
import shutil
from colorama import Fore
from pydantic import BaseModel from pydantic import BaseModel
from typing import Any, Union, List
import os
from eye import EyeId
CONFIG_FILE_NAME: str = "eyetrack_settings.json" CONFIG_FILE_NAME: str = "eyetrack_settings.json"
BACKUP_CONFIG_FILE_NAME: str = "eyetrack_settings.backup"
class EyeTrackCameraConfig(BaseModel): class EyeTrackCameraConfig(BaseModel):
threshold: int = 50 gui_rotation_ui_padding: bool = True
rotation_angle: int = 0 rotation_angle: int = 0
roi_window_x: int = 0 roi_window_x: int = 0
roi_window_y: int = 0 roi_window_y: int = 0
roi_window_w: int = 0 roi_window_w: int = 240
roi_window_h: int = 0 roi_window_h: int = 240
focal_length: int = 30 focal_length: int = 30
capture_source: Union[int, str, None] = None capture_source: Union[int, str, None] = None
gui_circular_crop: bool = False calib_XMAX: Union[float, None] = None
calib_XMIN: Union[float, None] = None
calib_YMAX: Union[float, None] = None
calib_YMIN: Union[float, None] = None
calib_XOFF: Union[float, None] = None
calib_YOFF: Union[float, None] = None
calibration_points: List[List[Union[float, None]]] = []
calibration_points_3d: List[List[Union[float, None]]] = []
leap_calibration_percentile_90: float = 0
leap_calibration_percentile_2: float = 0
leap_calibrated: bool = False
def update_capture_source(self, new_camera_address: str):
if not new_camera_address:
self.capture_source = None
return
if new_camera_address.isnumeric():
self.capture_source = int(new_camera_address)
return
# we were passed an IP, probably, lets add HTTP:// to it
if len(new_camera_address) > 5 and not (
not new_camera_address.startswith(("http", "/dev")) or not new_camera_address.endswith(".mp4")
):
self.capture_source = f"http://{new_camera_address}"
return
self.capture_source = new_camera_address
def update(self, data: dict[str, Any]) -> bool:
"""
Updates the model one field at a time based on the provided data dict.
The dict has to be defined like
```
data = {
"model_field": value
}
```
If stale data is provided,
ex. User clicked on save and restart but didn't provide a new field
we skip it, assuming that it was just a call to restart the tracking, or a miss-click.
Some fields may require more validation, we take care of that with special methods.
defining a method like
```
def update_custom_field(value: type):
pass
```
will cause it to be picked up by this method and called with the current value.
Return values are ignored.
"""
for key, value in data.items():
old_value = getattr(self, key, None)
# no reason to update if it's the same value
if old_value == value:
return False
if hasattr(self, f"update_{key}"):
update_attr = getattr(self, f"update_{key}")
if callable(update_attr):
update_attr(value)
else:
setattr(self, "key", value)
return True
else:
print(f"\033[93m[WARN] Field {key} does not exist on {self}.\033[0m")
return False
class EyeTrackSettingsConfig(BaseModel): class EyeTrackSettingsConfig(BaseModel):
gui_flip_x_axis_left: bool = False gui_flip_x_axis_left: bool = False
gui_flip_x_axis_right: bool = False gui_flip_x_axis_right: bool = False
gui_flip_y_axis: bool = False gui_flip_y_axis: bool = False
gui_blob_fallback: bool = True gui_RANSAC3D: bool = False
gui_HSF: bool = False
gui_BLOB: bool = False
gui_BLINK: bool = False
gui_HSRAC: bool = False
gui_AHSFRAC: bool = False
gui_AHSF: bool = False
gui_DADDY: bool = False
gui_LEAP: bool = True
gui_HSF_radius: int = 15
gui_HSF_radius_left: int = 10
gui_HSF_radius_right: int = 10
gui_min_cutoff: str = "0.0004" gui_min_cutoff: str = "0.0004"
gui_speed_coefficient: str = "0.9" gui_speed_coefficient: str = "0.9"
gui_osc_address: str = "127.0.0.1" gui_osc_address: str = "127.0.0.1"
@ -33,9 +150,72 @@ class EyeTrackSettingsConfig(BaseModel):
gui_blob_maxsize: float = 25 gui_blob_maxsize: float = 25
gui_blob_minsize: float = 10 gui_blob_minsize: float = 10
gui_recenter_eyes: bool = False gui_recenter_eyes: bool = False
gui_eye_falloff: bool = False gui_3d_calibration: bool = False
grab_3d_point: bool = False
tracker_single_eye: int = 0 tracker_single_eye: int = 0
gui_blink_sync: bool = False gui_threshold: int = 65
gui_AHSFRACP: int = 1
gui_AHSFP: int = 2
gui_HSRACP: int = 3
gui_HSFP: int = 4
gui_DADDYP: int = 5
gui_RANSAC3DP: int = 6
gui_BLOBP: int = 7
gui_LEAPP: int = 8
gui_IBO: bool = False
gui_skip_autoradius: bool = False
gui_thresh_add: int = 11
gui_update_check: bool = True
gui_ROSC: bool = False
gui_circular_crop_right: bool = False
gui_circular_crop_left: bool = False
ibo_filter_samples: int = 400
ibo_average_output_samples: int = 0
ibo_fully_close_eye_threshold: float = 0.3
leap_calibration_samples: int = 2000
calibration_samples: int = 600
osc_right_eye_close_address: str = "/avatar/parameters/RightEyeLidExpandedSqueeze"
osc_left_eye_close_address: str = "/avatar/parameters/LeftEyeLidExpandedSqueeze"
osc_left_eye_x_address: str = "/avatar/parameters/LeftEyeX"
osc_right_eye_x_address: str = "/avatar/parameters/RightEyeX"
osc_eyes_y_address: str = "/avatar/parameters/EyesY"
osc_eyes_pupil_dilation_address: str = "/avatar/parameters/EyesDilation"
osc_invert_eye_close: bool = False
gui_RANSACBLINK: bool = False
gui_disable_gui: bool = False
gui_right_eye_dominant: bool = False
gui_left_eye_dominant: bool = False
gui_outer_side_falloff: bool = False
gui_eye_dominant_diff_thresh: float = 0.3
gui_legacy_ransac: bool = False
gui_legacy_ransac_thresh_right: int = 80
gui_legacy_ransac_thresh_left: int = 80
gui_LEAP_lid: bool = True
gui_osc_vrcft_v1: bool = False
gui_osc_vrcft_v2: bool = False
gui_vrc_native: bool = True
gui_pupil_dilation: bool = False
gui_VRCFTModulePort: int = 8889
gui_VRCFTModuleIPAddress: str = "127.0.0.1"
gui_ShouldEmulateEyeWiden: bool = False
gui_ShouldEmulateEyeSquint: bool = False
gui_ShouldEmulateEyebrows: bool = False
gui_WidenThresholdV1_min: float = 0.60
gui_WidenThresholdV1_max: float = 1
gui_WidenThresholdV2_min: float = 0.60
gui_WidenThresholdV2_max: float = 1.05
gui_SqueezeThresholdV1_min: float = 0.07
gui_SqueezeThresholdV1_max: float = 0.5
gui_SqueezeThresholdV2_min: float = 0.07
gui_SqueezeThresholdV2_max: float = -1
gui_EyebrowThresholdRising: float = 0.8
gui_EyebrowThresholdLowering: float = 0.15
gui_OutputMultiplier: float = 1
gui_use_module: bool = False
class EyeTrackConfig(BaseModel): class EyeTrackConfig(BaseModel):
@ -44,15 +224,112 @@ class EyeTrackConfig(BaseModel):
left_eye: EyeTrackCameraConfig = EyeTrackCameraConfig() left_eye: EyeTrackCameraConfig = EyeTrackCameraConfig()
settings: EyeTrackSettingsConfig = EyeTrackSettingsConfig() settings: EyeTrackSettingsConfig = EyeTrackSettingsConfig()
eye_display_id: EyeId = EyeId.RIGHT eye_display_id: EyeId = EyeId.RIGHT
__listeners = []
@staticmethod @staticmethod
def load(): def load():
if not os.path.exists(CONFIG_FILE_NAME): if not os.path.exists(CONFIG_FILE_NAME):
print("No settings file, using base settings") print("No settings file, using base settings")
return EyeTrackConfig() return EyeTrackConfig()
with open(CONFIG_FILE_NAME, "r") as settings_file: try:
return EyeTrackConfig(**json.load(settings_file)) with open(CONFIG_FILE_NAME, "r") as settings_file:
return EyeTrackConfig(**json.load(settings_file))
except json.JSONDecodeError:
print("[INFO] Failed to load settings file")
load_config = None
if os.path.exists(BACKUP_CONFIG_FILE_NAME):
try:
with open(BACKUP_CONFIG_FILE_NAME, "r") as settings_file:
load_config = EyeTrackConfig(**json.load(settings_file))
print("[INFO] Using backup settings")
except json.JSONDecodeError:
pass
if load_config is None:
print("[INFO] using base settings")
load_config = EyeTrackConfig()
return load_config
def validate_camera_address_conflict(self, eye_id, capture_source):
match eye_id:
case EyeId.RIGHT:
if self.left_eye.capture_source == capture_source:
print(
f"{Fore.YELLOW}[WARN] Capture source {capture_source} already in use by the left camera.{Fore.RESET}"
)
return False
case EyeId.LEFT:
if self.right_eye.capture_source == capture_source:
print(
f"{Fore.YELLOW}[WARN] Capture source {capture_source} already in use by the right camera.{Fore.RESET}"
)
return False
case _:
return False
return True
def update_eye_model_config(self, eye_id: EyeId, data: dict, should_save=True, should_notify=True) -> bool:
"""
A more granular method for updating a particular model so that everything that relies on it
will get notified about any changes. Note, it acts a bit like pub-sub,
we don't care what changes got passed, we will notify the listeners with them.
It's the listeners job to check if they want that update.
"""
# The app really doesn't like address clashes, so we have to validate it as soon as possible
# otherwise we crash
if "capture_source" in data and not self.validate_camera_address_conflict(eye_id, data["capture_source"]):
return False
match eye_id:
case EyeId.RIGHT:
changed = self.right_eye.update(data)
case EyeId.LEFT:
changed = self.left_eye.update(data)
case _:
return False
if should_save:
self.save()
if should_notify:
self.__notify_listeners(data)
return changed
def update(self, data, save=False):
"""
More of an internal method for modules to be able to update the config
and have other parts of the system react to changes
"""
for field, value in data.items():
setattr(self.settings, field, value)
self.__notify_listeners(data)
if save:
self.save()
def save(self): def save(self):
with open(CONFIG_FILE_NAME, "w+") as settings_file: # make sure this is only called if there is a change
json.dump(obj=self.dict(), fp=settings_file) if os.path.exists(CONFIG_FILE_NAME):
try:
# Verify existing configuration files.
with open(CONFIG_FILE_NAME, "r") as settings_file:
EyeTrackConfig(**json.load(settings_file))
shutil.copy(CONFIG_FILE_NAME, BACKUP_CONFIG_FILE_NAME)
# print("Backed up settings files.") # Comment out because it's too loud.
except shutil.SameFileError:
pass
except json.JSONDecodeError:
# No backup because the saved settings file is broken.
pass
with open(CONFIG_FILE_NAME, "w") as settings_file:
json.dump(obj=self.model_dump(warnings=False), fp=settings_file)
print(f"\033[92m[INFO] Config Saved Successfully\033[0m")
def register_listener_callback(self, callback):
print(f"[DEBUG] Registering listener {callback}")
self.__listeners.append(callback)
def __notify_listeners(self, data: dict):
for listener in self.__listeners:
listener(data)

356
EyeTrackApp/daddy.py Normal file
View File

@ -0,0 +1,356 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
DADDY By: PallasNeko Optimization
Algorithm App Implementations By: PallasNeko, Prohurtz
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import sys
from typing import Tuple
import math
import platform
import numpy as np
import cv2
import onnxruntime
from one_euro_filter import OneEuroFilter
from utils.misc_utils import FastMedian, resource_path
import os
os.environ["OMP_NUM_THREADS"] = "1"
# DADDY
# Please change the name of this script and the name of the method if you have something better.
video_path = "ezgif.com-gif-maker.avi"
input_size = 192 # Do not change this number.
heatmap_size = 48 # Do not change this number.
kernel_size = 7
if platform.system() == "Darwin":
model_file = "Models/daddy230210.onnx" # The model file name will be changed when performance stabilises. # funny MacOS files issues :P
else:
model_file = "Models/daddy230210.onnx" # The model file name will be changed when performance stabilises.
# SHA256 for model version verification
# daddy230210.onnx = 59e59aa2a21024884200dd3acbd5e6a2e8d7209c46555fbdc727d4fe3adb68d3
imshow_enable = False
save_video = False
save_filepath = "output.mp4"
def get_max_preds(batch_heatmaps):
# base:https://github.com/ilovepose/DarkPose
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def taylor(hm, coord):
# base:https://github.com/ilovepose/DarkPose
heatmap_height = hm.shape[0]
heatmap_width = hm.shape[1]
px = int(coord[0])
py = int(coord[1])
if 1 < px < heatmap_width - 2 and 1 < py < heatmap_height - 2:
dx = 0.5 * (hm[py][px + 1] - hm[py][px - 1])
dy = 0.5 * (hm[py + 1][px] - hm[py - 1][px])
dxx = 0.25 * (hm[py][px + 2] - 2 * hm[py][px] + hm[py][px - 2])
dxy = 0.25 * (hm[py + 1][px + 1] - hm[py - 1][px + 1] - hm[py + 1][px - 1] + hm[py - 1][px - 1])
dyy = 0.25 * (hm[py + 2 * 1][px] - 2 * hm[py][px] + hm[py - 2 * 1][px])
derivative = np.matrix([[dx], [dy]])
hessian = np.matrix([[dxx, dxy], [dxy, dyy]])
if dxx * dyy - dxy**2 != 0:
hessianinv = hessian.I
offset = -hessianinv * derivative
offset = np.squeeze(np.array(offset.T), axis=0)
coord += offset
return coord
def gaussian_blur(hm, kernel):
# base:https://github.com/ilovepose/DarkPose
border = (kernel - 1) // 2
batch_size = hm.shape[0]
num_joints = hm.shape[1]
height = hm.shape[2]
width = hm.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(hm[i, j])
dr = np.zeros((height + 2 * border, width + 2 * border))
dr[border:-border, border:-border] = hm[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
hm[i, j] = dr[border:-border, border:-border].copy()
hm[i, j] *= origin_max / np.max(hm[i, j])
return hm
def get_final_preds(hm, realsize):
# base:https://github.com/ilovepose/DarkPose
coords, maxvals = get_max_preds(hm)
# post-processing
hm = gaussian_blur(hm, kernel_size)
hm = np.maximum(hm, 1e-10)
hm = np.log(hm)
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
coords[n, p] = taylor(hm[n][p], coords[n][p])
preds = coords.copy()
preds = (preds / heatmap_size) * realsize # input_size
# Transform back
# for i in range(coords.shape[0]):
# preds[i] = transform_preds(
# coords[i], center[i], scale[i], [heatmap_width, heatmap_height]
# )
return preds, maxvals
def resize_with_pad(
image: np.array, new_shape: Tuple[int, int], padding_color: Tuple[int] = (255, 255, 255)
) -> np.array:
"""
https://gist.github.com/IdeaKing/11cf5e146d23c5bb219ba3508cca89ec
Maintains aspect ratio and resizes with padding.
Params:
image: Image to be resized.
new_shape: Expected (width, height) of new image.
padding_color: Tuple in BGR of padding color
Returns:
image: Resized image with padding
"""
original_shape = (image.shape[1], image.shape[0])
ratio = float(max(new_shape)) / max(original_shape)
new_size = tuple([int(x * ratio) for x in original_shape])
image = cv2.resize(image, new_size)
delta_w = new_shape[0] - new_size[0]
delta_h = new_shape[1] - new_size[1]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=padding_color)
return image
# Better Eye aspEct Ratio
class BEER(object):
def __init__(self):
self.ear_th = 0.2
self.ear_min = 0.05
self.ear_max = 0.2
self.p03_med = FastMedian(k=256)
self.prev_ear = 0.5
# todo https://peerj.com/articles/cs-943/
def ear(self, pred):
p15 = np.linalg.norm(pred[1] - pred[5])
p24 = np.linalg.norm(pred[2] - pred[4])
p03 = np.linalg.norm(pred[0] - pred[3])
self.p03_med + p03
if p03 > self.p03_med.median() * 1.5:
return self.prev_ear
ear = (p15 + p24) / (2 * self.p03_med.median())
self.ear_minmax(ear)
norm_ear = self.ear_norm(ear)
self.prev_ear = norm_ear.copy()
return norm_ear
def ear_minmax(self, ear):
if ear < self.ear_min:
self.ear_min = ear.copy()
if ear > self.ear_max:
self.ear_max = ear.copy()
def ear_norm(self, ear):
return (ear - self.ear_min) / (
self.ear_max - self.ear_min
) # todo:It is better to add very small values to avoid zero division.
#
# loopnum = 0
#
# Deep leArning lanDmark Detection for eYes
class DADDY_cls(object):
def __init__(self):
onnxruntime.disable_telemetry_events()
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1 # This number should be changed accordingly
options.intra_op_num_threads = 1 # This number should be changed accordingly
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
ort_session = onnxruntime.InferenceSession(
resource_path(model_file), sess_options=options, providers=["CPUExecutionProvider"]
)
ort_session.set_providers(["CPUExecutionProvider"]) # only cpu mode
self.ort_session = ort_session
self.input_name = ort_session.get_inputs()[0].name
self.output_name = ort_session.get_outputs()[0].name
min_cutoff = 0.0004
beta = 0.9
input_point = np.zeros((11, 2)) # np.array([1, 1])
self.one_euro_filter = OneEuroFilter(input_point, min_cutoff=min_cutoff, beta=beta)
# self.ear_oef = OneEuroFilter(
# np.zeros(1),
# min_cutoff=min_cutoff,
# beta=beta
# ) # memo: Parameters need tuning
self.beer = BEER()
# filepath = 'test.mp4'
# codec = cv2.VideoWriter_fourcc(*"mp4v")
# video = cv2.VideoWriter(filepath, codec, 60.0, (200, 150), 0) # (60, 60)) # (150, 200))
# self.video = video
def open_video(self, video_path):
# Temporary implementation to run
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise IOError("Error opening video stream or file")
self.cap = cap
return True
def read_frame(self):
# Temporary implementation to run
if not self.cap.isOpened():
return False
ret, frame = self.cap.read()
if ret:
# I have set it to grayscale (1ch) just in case, but if the frame is 1ch, this line can be commented out.
# self.current_image=frame # debug code
self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return True
return False
def single_run(self):
# Temporary implementation to run
# todo: If it's the left hand eye, flip the image left to right.
gray_frame = self.current_image_gray.copy()
# frame_resize=resize_with_pad(gray_frame,(input_size,input_size))
# or
frame_resize = cv2.resize(gray_frame, (input_size, input_size))
imgs = np.divide(frame_resize[np.newaxis, np.newaxis], 255, dtype=np.float32) # input/255.0
pred_heatmap = self.ort_session.run(None, {self.input_name: imgs})[0] # .reshape((-1, 2))
# if imshow_enable:
# heatmap = pred_heatmap.reshape((-1, heatmap_size, heatmap_size))
# for i in range(heatmap.shape[0]):
# cv2.imshow("heatmap_{}".format(i + 1), heatmap[i])
pred, max_val = get_final_preds(
pred_heatmap, (self.current_image_gray.shape[1], self.current_image_gray.shape[0])
)
pred = pred.reshape((-1, 2))
# or
# pred, max_val = get_final_preds(pred_heatmap, input_size)
# pred = pred.reshape((-1, 2))
# height, width = self.current_image_gray.shape[:2]
# scale_x = input_size/ width
# scale_y = input_size / height
# pred[:, 0] *= scale_x
# pred[:, 1] *= scale_y
pred = self.one_euro_filter(pred)
kps = pred.astype(np.int32)
# eyecenter = kps[:6].mean(axis=0).astype(int)
ear = self.beer.ear(pred)
# ear=self.ear_oef(ear[np.newaxis])#memo: Parameters need tuning
pupil_center = pred[7:].mean(axis=0)
pupil_center_x = int(pupil_center[0])
pupil_center_y = int(pupil_center[1])
for i in range(kps.shape[0]):
if i < 6:
color = (0, 0, 255)
elif i == 6:
color = 128
else:
color = (255, 0, 0)
# todo: We should have a proper variable for drawing.
cv2.circle(self.current_image_gray, (kps[i, 0], kps[i, 1]), 1, color, 2)
# cv2.putText(self.current_image_gray, str(i), (kps[i, 0] - 10, kps[i, 1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# cv2.putText(self.current_image_gray, "EAR: "+str(ear), (self.current_image_gray.shape[1]//10, self.current_image_gray.shape[0]//10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,0), 1)
# global loopnum
# if loopnum < 1350*2:
# # self.video.write(cv2.resize(gray_frame.copy(), (200, 150), None))
# loopnum += 1
# else:
# # self.video.release()
# cv2.destroyAllWindows()
# sys.exit()
# if w_video:
# video.release()
# kps[i, :] = (x, y)
# i == [0:6] = Inner and outer corners of eyes and eyelids
# i == [6] = pupil
# i == [7:] = iris
return pupil_center_x, pupil_center_y, ear
class External_Run_DADDY(object):
def __init__(self):
self.algo = DADDY_cls()
def run(self, current_image_gray):
self.algo.current_image_gray = current_image_gray
pupil_x, pupil_y, ear = self.algo.single_run()
return pupil_x, pupil_y, ear
if __name__ == "__main__":
daddy = DADDY_cls()
daddy.open_video(video_path)
while daddy.read_frame():
_ = daddy.single_run()

View File

@ -0,0 +1,327 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Ellipse Based Pupil Dilation By: Prohurtz, PallasNeko (Optimization)
Algorithm App Implementations By: Prohurtz
Copyright (c) 2023 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import numpy
import numpy as np
import time
import os
import cv2
from eye import EyeId
from one_euro_filter import OneEuroFilter
os.environ["OMP_NUM_THREADS"] = "1"
# Note.
# OpenCV on Windows will generate an error if the file path contains non-ASCII characters when using cv2.imread(), cv2.imwrite(), etc.
# https://stackoverflow.com/questions/43185605/how-do-i-read-an-image-from-a-path-with-unicode-characters
# https://github.com/opencv/opencv/issues/18305
def csv2data(frameshape, filepath):
# For data checking
frameshape = (frameshape[0], frameshape[1] + 1)
out = np.zeros(frameshape, dtype=np.uint32)
xy_list = []
val_list = []
with open(filepath, mode="r", encoding="utf-8") as in_f:
# Skip header.
_ = in_f.readline()
for s in in_f:
xyval = [int(val) for val in s.strip().split(",")]
xy_list.append((xyval[0], xyval[1]))
val_list.append(xyval[2])
xy_list = np.array(xy_list)
val_list = np.array(val_list)
out[xy_list[:, 1], xy_list[:, 0]] = val_list[:]
return out
def data2csv(data_u32, filepath):
# For data checking
nonzero_index = np.nonzero(data_u32) # (row,col)
data_list = data_u32[nonzero_index].tolist()
datalines = ["{},{},{}\n".format(x, y, val) for y, x, val in zip(*nonzero_index, data_list)]
with open(filepath, "w", encoding="utf-8") as out_f:
out_f.write("x,y,eyedilation\n")
out_f.writelines(datalines)
return
def u32_1ch_to_u16_3ch(img):
out = np.zeros((*img.shape[:2], 3), dtype=np.uint16)
# https://github.com/numpy/numpy/issues/2524
# https://stackoverflow.com/questions/52782511/why-is-numpy-slower-than-python-for-left-bit-shifts
out[:, :, 0] = img & np.uint32(65535)
out[:, :, 1] = (img >> np.uint32(16)) & np.uint32(65535)
return out
def u16_3ch_to_u32_1ch(img):
# The image format with the most bits that can be displayed on Windows without additional software and that opencv can handle is PNG's uint16
out = img[:, :, 0].astype(np.float64) # float64 = max 2^53
cv2.add(out, img[:, :, 1].astype(np.float64) * np.float64(65536), dst=out) # opencv did not have uint32 type
return out.astype(np.uint32) # cast
def newdata(frameshape):
print("\033[94m[INFO] Initialise data for dilation.\033[0m")
return np.zeros(frameshape, dtype=np.uint32)
# EBPD
class EllipseBasedPupilDilation:
def __init__(self, eye_id):
# todo: It is necessary to consider whether the filename can be changed in the configuration file, etc.
if eye_id in [EyeId.LEFT]:
self.imgfile = "EBPD_LEFT.png"
else:
pass
if eye_id in [EyeId.RIGHT]:
self.imgfile = "EBPD_RIGHT.png"
else:
pass
# self.data[0, -1] = maxval, [1, -1] = rotation, [2, -1] = x, [3, -1] = y
self.data = None
self.lct = None
self.maxval = 0
# self.img_roi = self.now_roi == {"rotation": 0, "x": 0, "y": 0}
self.img_roi = np.zeros(3, dtype=np.int32)
self.now_roi = np.zeros(3, dtype=np.int32)
self.prev_val = 0.5
self.avg_dilation = 0.0
self.old = []
self.color = []
self.x = []
self.fc = 0
self.filterlist = []
self.averageList = []
self.openlist = []
self.eye_id = eye_id
self.maxinten = 0
self.tri_filter = []
min_cutoff = 0.00001
beta = 0.05
noisy_point = np.array([1, 1])
self.one_euro_filter = OneEuroFilter(noisy_point, min_cutoff=min_cutoff, beta=beta)
def check(self, frameshape):
# 0 in data is used as the initial value.
# When assigning a value, +1 is added to the value to be assigned.
self.load(frameshape)
# self.maxval = self.data[0, -1]
if self.lct is None:
self.lct = time.time()
def load(self, frameshape):
req_newdata = False
# Not very clever, but increase the width by 1px to save the maximum value.
frameshape = (frameshape[0], frameshape[1] + 1)
if self.data is None:
print(f"\033[92m[INFO] Loaded data for pupil dilation: {self.imgfile}\033[0m")
if os.path.isfile(self.imgfile):
try:
img = cv2.imread(self.imgfile, flags=cv2.IMREAD_UNCHANGED)
# check code: cv2.absdiff(img,u32_1ch_to_u16_3ch(u16_3ch_to_u32_1ch(img)))
if img.shape[:2] != frameshape:
print("[WARN] Size does not match the input frame.")
req_newdata = True
else:
self.data = u16_3ch_to_u32_1ch(img)
self.img_roi[:] = self.data[1:4, -1]
if not np.array_equal(self.img_roi, self.now_roi):
# If the ROI recorded in the image file differs from the current ROI
req_newdata = True
else:
self.maxval = self.data[0, -1]
except:
print("[ERROR] File read error: {}".format(self.imgfile))
req_newdata = True
else:
print("\033[94m[INFO] File does not exist.\033[0m")
req_newdata = True
else:
if self.data.shape != frameshape or not np.array_equal(self.img_roi, self.now_roi):
# If the ROI recorded in the image file differs from the current ROI
# todo: Using the previous and current frame sizes and centre positions from the original, etc., the data can be ported to some extent, but there may be many areas where code changes are required.
print("[INFO] \033[94mFrame size changed.\033[0m")
req_newdata = True
if req_newdata:
self.data = newdata(frameshape)
self.maxval = 0
self.img_roi = self.now_roi.copy()
# data2csv(self.data, "a.csv")
# csv2data(frameshape,"a.csv")
def save(self):
self.data[0, -1] = self.maxval
self.data[1:4, -1] = self.now_roi
cv2.imwrite(self.imgfile, u32_1ch_to_u16_3ch(self.data))
# print("SAVED: {}".format(self.imgfile))
def change_roi(self, roiinfo: dict):
self.now_roi[:] = [v for v in roiinfo.values()]
def clear_filter(self):
self.data = None
self.filterlist.clear()
self.averageList.clear()
if os.path.exists(self.imgfile):
os.remove(self.imgfile)
def intense(self, w, h, x, y, frame, filterSamples, outputSamples):
# x,y = 0~(frame.shape[1 or 0]-1), frame = 1-channel frame cropped by ROI
self.check(frame.shape)
int_x, int_y = int(x), int(y)
if int_x < 0 or int_y < 0:
return self.prev_val
upper_x = min(
int_x + 25, frame.shape[1] - 1
) # TODO make this a setting NEEDS TO BE BASED ON HSF RADIUS if possible
lower_x = max(int_x - 25, 0)
upper_y = min(int_y + 25, frame.shape[0] - 1)
lower_y = max(int_y - 25, 0)
# The same can be done with cv2.integral, but since there is only one area of the rectangle for which we want to know the total value, there is no advantage in terms of computational complexity.
pupil_area = numpy.pi * (w / 2) * (h / 2)
if len(self.filterlist) < filterSamples:
self.filterlist.append(pupil_area)
else:
self.filterlist.pop(0)
self.filterlist.append(pupil_area)
try:
if pupil_area >= np.percentile(self.filterlist, 99): # filter abnormally high values
# print('filter, assume blink')
pupil_area = self.maxval
except:
pass
newval_flg = False
oob = False
if int_x >= frame.shape[1]:
int_x = frame.shape[1] - 1
oob = True
if int_x < 0:
int_x = True
oob = True
if int_y >= frame.shape[0]:
int_y = frame.shape[0] - 1
oob = True
if int_y < 0:
int_y = 1
oob = True
if oob != True and self.data.any():
data_val = self.data[int_y, int_x]
else:
data_val = 0
# max pupil per cord
if data_val == 0:
# The value of the specified coordinates has not yet been recorded.
self.data[int_y, int_x] = pupil_area
changed = True
newval_flg = True
else:
if pupil_area < data_val: # if current intensity value is less (more pupil), save that
self.data[int_y, int_x] = pupil_area # set value
changed = True
else:
pupil_areaa = max(
data_val + 5000, 1
) # if current intensity value is not less use this is an agressive adjust, test
self.data[int_y, int_x] = pupil_areaa # set value
changed = True
# min pupil global
if self.maxval == 0: # that value is not yet saved
self.maxval = pupil_area # set value at 0 index
else:
if pupil_area > self.maxval: # if current intensity value is more (less pupil), save that NOTE: we have the
self.maxval = pupil_area - 5 # set value at 0 index
else:
pupil_aread = max(
(self.maxval - 5), 1
) # continuously adjust closed intensity, will be set when user blink, used to allow eyes to close when lighting changes
self.maxval = pupil_aread # set value at 0 index
if newval_flg:
# Do the same thing as in the original version.
eyedilation = self.prev_val # 0.9
else:
maxp = float(self.data[int_y, int_x])
minp = float(self.maxval)
try:
if not np.isfinite(pupil_area) or not np.isfinite(maxp) or not np.isfinite(minp) or (minp - maxp) == 0:
eyedilation = 0.5
else:
eyedilation = (pupil_area - maxp) / (minp - maxp)
except:
eyedilation = 0.5
eyedilation = 1 - eyedilation
if outputSamples > 0:
if len(self.averageList) < outputSamples:
self.averageList.append(eyedilation)
else:
self.averageList.pop(0)
self.averageList.append(eyedilation)
eyedilation = np.average(self.averageList)
if eyedilation > 1: # clamp values
eyedilation = 1.0
if eyedilation < 0:
eyedilation = 0.0
if changed and ((time.time() - self.lct) > 15): # save every 5 seconds if something changed to save disk usage
self.save()
self.lct = time.time()
self.prev_val = eyedilation
try:
noisy_point = np.array([float(eyedilation), float(eyedilation)]) # fliter our values with a One Euro Filter
point_hat = self.one_euro_filter(noisy_point)
eyedilationx = point_hat[0]
eyedilationy = point_hat[1]
eyedilation = (eyedilationx + eyedilationy) / 2
except:
pass
return eyedilation

212
EyeTrackApp/enums.py Normal file
View File

@ -0,0 +1,212 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
from __future__ import annotations
import types
from collections import namedtuple
from typing import (
Any,
ClassVar,
Dict,
List,
TYPE_CHECKING,
Tuple,
Type,
TypeVar,
Iterator,
Mapping,
)
__all__ = (
"Enum",
"EyeLR",
)
if TYPE_CHECKING:
from typing_extensions import Self
def _create_value_cls(name: str, comparable: bool):
# All the type ignores here are due to the type checker being unable to recognise
# Runtime type creation without exploding.
cls = namedtuple("_EnumValue_" + name, "name value")
cls.__repr__ = lambda self: f"<{name}.{self.name}: {self.value!r}>" # type: ignore
cls.__str__ = lambda self: f"{name}.{self.name}" # type: ignore
if comparable:
cls.__le__ = lambda self, other: isinstance(other, self.__class__) and self.value <= other.value # type: ignore
cls.__ge__ = lambda self, other: isinstance(other, self.__class__) and self.value >= other.value # type: ignore
cls.__lt__ = lambda self, other: isinstance(other, self.__class__) and self.value < other.value # type: ignore
cls.__gt__ = lambda self, other: isinstance(other, self.__class__) and self.value > other.value # type: ignore
return cls
def _is_descriptor(obj):
return (
hasattr(obj, "__get__") or hasattr(obj, "__set__") or hasattr(obj, "__delete__")
)
class EnumMeta(type):
if TYPE_CHECKING:
__name__: ClassVar[str]
_enum_member_names_: ClassVar[List[str]]
_enum_member_map_: ClassVar[Dict[str, Any]]
_enum_value_map_: ClassVar[Dict[Any, Any]]
def __new__(
cls,
name: str,
bases: Tuple[type, ...],
attrs: Dict[str, Any],
*,
comparable: bool = False,
):
value_mapping = {}
member_mapping = {}
member_names = []
value_cls = _create_value_cls(name, comparable)
for key, value in list(attrs.items()):
is_descriptor = _is_descriptor(value)
if key[0] == "_" and not is_descriptor:
continue
# Special case classmethod to just pass through
if isinstance(value, classmethod):
continue
if is_descriptor:
setattr(value_cls, key, value)
del attrs[key]
continue
try:
new_value = value_mapping[value]
except KeyError:
new_value = value_cls(name=key, value=value)
value_mapping[value] = new_value
member_names.append(key)
member_mapping[key] = new_value
attrs[key] = new_value
attrs["_enum_value_map_"] = value_mapping
attrs["_enum_member_map_"] = member_mapping
attrs["_enum_member_names_"] = member_names
attrs["_enum_value_cls_"] = value_cls
actual_cls = super().__new__(cls, name, bases, attrs)
value_cls._actual_enum_cls_ = actual_cls # type: ignore # Runtime attribute isn't understood
return actual_cls
def __iter__(cls) -> Iterator[Any]:
return (cls._enum_member_map_[name] for name in cls._enum_member_names_)
def __reversed__(cls) -> Iterator[Any]:
return (
cls._enum_member_map_[name] for name in reversed(cls._enum_member_names_)
)
def __len__(cls) -> int:
return len(cls._enum_member_names_)
def __repr__(cls) -> str:
return f"<enum {cls.__name__}>"
@property
def __members__(cls) -> Mapping[str, Any]:
return types.MappingProxyType(cls._enum_member_map_)
def __call__(cls, value: str) -> Any:
try:
return cls._enum_value_map_[value]
except (KeyError, TypeError):
raise ValueError(f"{value!r} is not a valid {cls.__name__}")
def __getitem__(cls, key: str) -> Any:
return cls._enum_member_map_[key]
def __setattr__(cls, name: str, value: Any) -> None:
raise TypeError("Enums are immutable.")
def __delattr__(cls, attr: str) -> None:
raise TypeError("Enums are immutable")
def __instancecheck__(self, instance: Any) -> bool:
# isinstance(x, Y)
# -> __instancecheck__(Y, x)
try:
return instance._actual_enum_cls_ is self
except AttributeError:
return False
if TYPE_CHECKING:
from enum import Enum, IntEnum
else:
class Enum(metaclass=EnumMeta):
@classmethod
def try_value(cls, value):
try:
return cls._enum_value_map_[value]
except (KeyError, TypeError):
return value
E = TypeVar("E", bound="Enum")
def create_unknown_value(cls: Type[E], val: Any) -> E:
value_cls = cls._enum_value_cls_ # type: ignore # This is narrowed below
name = f"unknown_{val}"
return value_cls(name=name, value=val)
def try_enum(cls: Type[E], val: Any) -> E:
"""A function that tries to turn the value into enum ``cls``.
If it fails it returns a proxy invalid value instead.
"""
try:
return cls._enum_value_map_[val] # type: ignore # All errors are caught below
except (KeyError, TypeError, AttributeError):
return create_unknown_value(cls, val)
# The line above is based on the code in the following url
# https://github.com/Rapptz/discord.py/blob/f7e97954950ffb0e34238d70813454caa6f1a3ae/discord/enums.py
class EyeLR(Enum):
LEFT = 1
RIGHT = 2
def __str__(self) -> str:
return self.name
def __int__(self) -> int:
return self.value

58
EyeTrackApp/eye.py Normal file
View File

@ -0,0 +1,58 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
from dataclasses import dataclass
from enum import Enum, IntEnum
class EyeId(IntEnum):
RIGHT = 0
LEFT = 1
BOTH = 2
SETTINGS = 3
ALGOSETTINGS = 4
VRCFTMODULESETTINGS = 5
GUIOFF = 6
class EyeInfoOrigin(Enum):
RANSAC = 1
BLOB = 2
FAILURE = 3
HSF = 4
HSRAC = 5
DADDY = 6
LEAP = 7
@dataclass
class EyeInfo:
info_type: EyeInfoOrigin
x: float
y: float
pupil_dilation: float
blink: float
avg_velocity: float

File diff suppressed because it is too large Load Diff

View File

@ -1,94 +1,269 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import os import os
from osc import VRChatOSCReceiver, VRChatOSC, EyeId
from config import EyeTrackConfig
from camera_widget import CameraWidget
from settings_widget import SettingsWidget
import queue
import threading
import PySimpleGUI as sg import PySimpleGUI as sg
import sys import queue
from urllib.request import urlopen import requests
from bs4 import BeautifulSoup import threading
from camera_widget import CameraWidget
from config import EyeTrackConfig
from eye import EyeId
from settings.VRCFTModuleSettings import VRCFTSettingsWidget
from settings.general_settings_widget import SettingsWidget
from settings.algo_settings_widget import AlgoSettingsWidget
from osc.osc import OSCManager
from osc.OSCMessage import OSCMessage
from utils.misc_utils import is_nt, resource_path
import cv2
import numpy as np
import uuid
import webbrowser winmm = None
if is_nt:
from winotify import Notification
from ctypes import windll, c_int
try:
winmm = windll.winmm
except OSError:
print("\033[91m[WARN] Failed to load winmm.dll\033[0m")
os.system("color") # init ANSI color
if sys.platform.startswith("win"):
from win10toast_click import ToastNotifier
# Random environment variable to speed up webcam opening on the MSMF backend. # Random environment variable to speed up webcam opening on the MSMF backend.
# https://github.com/opencv/opencv/issues/17687 # https://github.com/opencv/opencv/issues/17687
os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0" os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0"
WINDOW_NAME = "EyeTrackApp" WINDOW_NAME = "EyeTrackApp"
RIGHT_EYE_NAME = "-RIGHTEYEWIDGET-"
LEFT_EYE_NAME = "-LEFTEYEWIDGET-"
SETTINGS_NAME = "-SETTINGSWIDGET-"
LEFT_EYE_RADIO_NAME = "-LEFTEYERADIO-"
RIGHT_EYE_RADIO_NAME = "-RIGHTEYERADIO-"
BOTH_EYE_RADIO_NAME = "-BOTHEYERADIO-"
SETTINGS_RADIO_NAME = '-SETTINGSRADIO-'
page_url = 'https://github.com/RedHawk989/EyeTrackVR/releases/latest'
def open_url(): page_url = "https://github.com/EyeTrackVR/EyeTrackVR/releases/latest"
try: appversion = "EyeTrackApp 0.2.0"
webbrowser.open_new(page_url)
print('Opening URL...')
except:
print('Failed to open URL. Unsupported variable type.')
class KeyManager:
def __init__(self):
self.update_keys()
def update_keys(self):
unique_id = str(uuid.uuid4())
self.RIGHT_EYE_NAME = f"-RIGHTEYEWIDGET{unique_id}-"
self.LEFT_EYE_NAME = f"-LEFTEYEWIDGET{unique_id}-"
self.SETTINGS_NAME = f"-SETTINGSWIDGET{unique_id}-"
self.ALGO_SETTINGS_NAME = f"-ALGOSETTINGSWIDGET{unique_id}-"
self.VRCFT_MODULE_SETTINGS_NAME = f"-VRCFTSETTINGSWIDGET{unique_id}-"
self.LEFT_EYE_RADIO_NAME = f"-LEFTEYERADIO{unique_id}-"
self.RIGHT_EYE_RADIO_NAME = f"-RIGHTEYERADIO{unique_id}-"
self.BOTH_EYE_RADIO_NAME = f"-BOTHEYERADIO{unique_id}-"
self.SETTINGS_RADIO_NAME = f"-SETTINGSRADIO{unique_id}-"
self.ALGO_SETTINGS_RADIO_NAME = f"-ALGOSETTINGSRADIO{unique_id}-"
self.VRCFT_MODULE_SETTINGS_RADIO_NAME = f"-VRCFTSETTINGSRADIO{unique_id}-"
self.GUIOFF_RADIO_NAME = f"-GUIOFF{unique_id}-"
# Create an instance of the KeyManager
key_manager = KeyManager()
def create_window(config, settings, eyes):
key_manager.update_keys()
for eye in eyes:
eye.update_layouts()
layout = [
[
sg.Radio(
"Left Eye",
"EYESELECTRADIO",
background_color="#292929",
default=(config.eye_display_id == EyeId.LEFT),
key=key_manager.LEFT_EYE_RADIO_NAME,
),
sg.Radio(
"Right Eye",
"EYESELECTRADIO",
background_color="#292929",
default=(config.eye_display_id == EyeId.RIGHT),
key=key_manager.RIGHT_EYE_RADIO_NAME,
),
sg.Radio(
"Both Eyes",
"EYESELECTRADIO",
background_color="#292929",
default=(config.eye_display_id == EyeId.BOTH),
key=key_manager.BOTH_EYE_RADIO_NAME,
),
sg.Radio(
"Settings",
"EYESELECTRADIO",
background_color="#292929",
default=(config.eye_display_id == EyeId.SETTINGS),
key=key_manager.SETTINGS_RADIO_NAME,
),
sg.Radio(
"Algo Settings",
"EYESELECTRADIO",
background_color="#292929",
default=(config.eye_display_id == EyeId.ALGOSETTINGS),
key=key_manager.ALGO_SETTINGS_RADIO_NAME,
),
],
[
sg.Radio(
"VRCFT Module Settings",
"EYESELECTRADIO",
background_color="#292929",
default=(config.eye_display_id == EyeId.VRCFTMODULESETTINGS),
key=key_manager.VRCFT_MODULE_SETTINGS_RADIO_NAME,
),
],
[
sg.Column(
eyes[1].widget_layout,
vertical_alignment="top",
key=key_manager.LEFT_EYE_NAME,
visible=(config.eye_display_id in [EyeId.LEFT, EyeId.BOTH]),
background_color="#424042",
),
sg.Column(
eyes[0].widget_layout,
vertical_alignment="top",
key=key_manager.RIGHT_EYE_NAME,
visible=(config.eye_display_id in [EyeId.RIGHT, EyeId.BOTH]),
background_color="#424042",
),
sg.Column(
settings[0].get_layout(),
vertical_alignment="top",
key=key_manager.SETTINGS_NAME,
visible=(config.eye_display_id in [EyeId.SETTINGS]),
background_color="#424042",
),
sg.Column(
settings[1].get_layout(),
vertical_alignment="top",
key=key_manager.ALGO_SETTINGS_NAME,
visible=(config.eye_display_id in [EyeId.ALGOSETTINGS]),
background_color="#424042",
),
sg.Column(
settings[2].get_layout(),
vertical_alignment="top",
key=key_manager.VRCFT_MODULE_SETTINGS_NAME,
visible=(config.eye_display_id in [EyeId.VRCFTMODULESETTINGS]),
background_color="#424042",
),
],
[
sg.Button(
"GUI OFF",
key=key_manager.GUIOFF_RADIO_NAME,
button_color="#6f4ca1",
),
],
# Keep at bottom!
[sg.Text("- - - Interface Paused - - -", key="-WINFOCUS-", background_color="#292929", text_color="#F0F0F0", justification="center", expand_x=True, visible=False)],
]
if config.eye_display_id in [EyeId.LEFT, EyeId.BOTH]:
eyes[1].start()
if config.eye_display_id in [EyeId.RIGHT, EyeId.BOTH]:
eyes[0].start()
if config.eye_display_id in [EyeId.SETTINGS]:
settings[0].start()
if config.eye_display_id in [EyeId.ALGOSETTINGS]:
settings[1].start()
if config.eye_display_id in [EyeId.VRCFTMODULESETTINGS]:
settings[2].start()
# the eye's needs to be running before it is passed to the OSC
# Create the window
return sg.Window(
f"{appversion}",
layout,
icon=resource_path("Images/logo.ico"),
background_color="#292929")
def timerResolution(toggle):
if winmm != None:
if toggle:
rc = c_int(winmm.timeBeginPeriod(1))
if rc.value != 0:
# TIMEERR_NOCANDO = 97
print(f"\033[93m[WARN] Failed to set timer resolution: {rc.value}\033[0m")
else:
winmm.timeEndPeriod(1)
def main(): def main():
# Get Configuration # Get Configuration
config: EyeTrackConfig = EyeTrackConfig.load() config: EyeTrackConfig = EyeTrackConfig.load()
config.save() config.save()
cancellation_event = threading.Event() cancellation_event = threading.Event()
# Check to see if we can connect to our video source first. If not, bring up camera finding # Check to see if we can connect to our video source first. If not, bring up camera finding
# dialog. # dialog.
try:
if config.settings.gui_update_check:
response = requests.get("https://api.github.com/repos/EyeTrackVR/EyeTrackVR/releases/latest")
latestversion = response.json()["name"]
appversion = "0.1.7.2" if (
url = "https://raw.githubusercontent.com/RedHawk989/EyeTrackVR-Installer/master/Version-Data/Version_Num.txt" appversion == latestversion
html = urlopen(url).read() ): # If what we scraped and hardcoded versions are same, assume we are up to date.
soup = BeautifulSoup(html, features="html.parser") print(f"\033[92m[INFO] App is the latest version! [{latestversion}]\033[0m")
for script in soup(["script", "style"]): else:
script.extract() print(
text = soup.get_text() f"\033[93m[INFO] You have app version [{appversion}] installed. Please update to [{latestversion}] for the newest features.\033[0m"
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
latestversion = '\n'.join(chunk for chunk in chunks if chunk)
if appversion == latestversion: # If what we scraped and hardcoded versions are same, assume we are up to date.
print(f"[INFO] App is up to date! {latestversion}")
else:
print(f"[INFO] You have app version {appversion} installed. Please update to {latestversion} for the newest fixes.")
if sys.platform.startswith("win"):
toaster = ToastNotifier()
toaster.show_toast( #show windows toast
"EyeTrackVR has an update.",
"Click to go to the latest version.",
icon_path= "Images/logo.ico",
duration=5,
threaded=True,
callback_on_click=open_url
) )
try:
if is_nt:
# icon = cwd + "\Images\logo.ico"
icon = resource_path("Images/logo.ico")
toast = Notification(
app_id="EyeTrackApp",
title="New Update Available!",
msg=f"Please update to {latestversion}",
icon=r"{}".format(icon),
)
toast.add_actions(
label="Download Page",
launch="https://github.com/EyeTrackVR/EyeTrackVR/releases/latest",
)
toast.show()
except Exception as e:
print("[INFO] Toast notifications not supported")
except:
print("\033[91m[INFO] Could not check for updates. Please try again later.\033[0m")
# Check to see if we have an ROI. If not, bring up ROI finder GUI. timerResolution(True)
# Spawn worker threads osc_queue: queue.Queue[OSCMessage] = queue.Queue(maxsize=10)
osc_queue: queue.Queue[tuple[bool, int, int]] = queue.Queue()
osc = VRChatOSC(cancellation_event, osc_queue, config)
osc_thread = threading.Thread(target=osc.run)
# start worker threads
osc_thread.start()
eyes = [ eyes = [
CameraWidget(EyeId.RIGHT, config, osc_queue), CameraWidget(EyeId.RIGHT, config, osc_queue),
@ -96,152 +271,221 @@ def main():
] ]
settings = [ settings = [
SettingsWidget(EyeId.SETTINGS, config, osc_queue), SettingsWidget(EyeId.SETTINGS, config),
AlgoSettingsWidget(EyeId.ALGOSETTINGS, config),
VRCFTSettingsWidget(EyeId.VRCFTMODULESETTINGS, config, osc_queue),
] ]
layout = [ osc_manager = OSCManager(
osc_message_in_queue=osc_queue,
config=config,
)
config.register_listener_callback(osc_manager.update)
config.register_listener_callback(eyes[0].on_config_update)
config.register_listener_callback(eyes[1].on_config_update)
osc_manager.register_listeners(
config.settings.gui_osc_recenter_address,
[ [
sg.Radio( eyes[0].osc_recenter_eyes,
"Right Eye", eyes[1].osc_recenter_eyes,
"EYESELECTRADIO",
background_color='#292929',
default=(config.eye_display_id == EyeId.RIGHT),
key=RIGHT_EYE_RADIO_NAME,
),
sg.Radio(
"Left Eye",
"EYESELECTRADIO",
background_color='#292929',
default=(config.eye_display_id == EyeId.LEFT),
key=LEFT_EYE_RADIO_NAME,
),
sg.Radio(
"Both Eyes",
"EYESELECTRADIO",
background_color='#292929',
default=(config.eye_display_id == EyeId.BOTH),
key=BOTH_EYE_RADIO_NAME,
),
sg.Radio(
"Settings",
"EYESELECTRADIO",
background_color='#292929',
default=(config.eye_display_id == EyeId.SETTINGS),
key=SETTINGS_RADIO_NAME,
),
], ],
)
osc_manager.register_listeners(
config.settings.gui_osc_recalibrate_address,
[ [
sg.Column( eyes[0].osc_recalibrate_eyes,
eyes[1].widget_layout, eyes[1].osc_recalibrate_eyes,
vertical_alignment="top",
key=LEFT_EYE_NAME,
visible=(config.eye_display_id in [EyeId.LEFT, EyeId.BOTH]),
background_color='#424042',
),
sg.Column(
eyes[0].widget_layout,
vertical_alignment="top",
key=RIGHT_EYE_NAME,
visible=(config.eye_display_id in [EyeId.RIGHT, EyeId.BOTH]),
background_color='#424042',
),
sg.Column(
settings[0].widget_layout,
vertical_alignment="top",
key=SETTINGS_NAME,
visible=(config.eye_display_id in [EyeId.SETTINGS]),
background_color='#424042',
),
], ],
] )
if config.eye_display_id in [EyeId.LEFT, EyeId.BOTH]: osc_manager.start()
eyes[1].start()
if config.eye_display_id in [EyeId.RIGHT, EyeId.BOTH]:
eyes[0].start()
if config.eye_display_id in [EyeId.SETTINGS, EyeId.BOTH]:
settings[0].start()
# the eye's needs to be running before it is passed to the OSC
osc_receiver = VRChatOSCReceiver(cancellation_event, config, eyes)
osc_receiver_thread = threading.Thread(target=osc_receiver.run)
osc_receiver_thread.start()
# Create the window
window = sg.Window(f"EyeTrackVR {appversion}" , layout, icon='Images/logo.ico', background_color='#292929')
# GUI Render loop
while True: while True:
tint = 33
fs = False
if config.settings.gui_disable_gui:
layoutg = [
[sg.Text("GUI Disabled!", background_color="#242224")],
[sg.Button('Enable GUI', button_color="#6f4ca1")]
]
# Create the window
windowg = sg.Window('ETVR', layoutg, background_color="#242224", size=(200, 80)) #icon=resource_path("Images/logo.ico") adds cpu usage.....
# Event loop
while True:
eventg, valuesg = windowg.read(timeout=tint)
if eventg == sg.WINDOW_CLOSED:
config.settings.gui_disable_gui = False
config.save()
break
elif eventg == 'Enable GUI':
config.settings.gui_disable_gui = False
config.save()
print('GUI Enabled')
break
windowg.close()
# First off, check for any events from the GUI # First off, check for any events from the GUI
event, values = window.read(timeout=1) window = create_window(config, settings, eyes)
# If we're in either mode and someone hits q, quit immediately while True:
if event == "Exit" or event == sg.WIN_CLOSED: event, values = window.read(timeout=tint) # this higher timeout saves some cpu usage
for eye in eyes:
eye.stop()
cancellation_event.set()
# shut down worker threads
osc_thread.join()
# TODO: find a way to have this function run on join maybe??
# threading.Event() wont work because pythonosc spawns its own thread.
# only way i can see to get around this is an ugly while loop that only checks if a threading event is triggered
# and then call the pythonosc shutdown function
osc_receiver.shutdown()
osc_receiver_thread.join()
print("Exiting EyeTrackApp")
return
if values[RIGHT_EYE_RADIO_NAME] and config.eye_display_id != EyeId.RIGHT: # If we're in either mode and someone hits q, quit immediately
eyes[0].start() if event in ("Exit", sg.WIN_CLOSED) and not config.settings.gui_disable_gui:
eyes[1].stop() for eye in eyes:
settings[0].stop() eye.stop()
window[RIGHT_EYE_NAME].update(visible=True) cancellation_event.set()
window[LEFT_EYE_NAME].update(visible=False) osc_manager.shutdown()
window[SETTINGS_NAME].update(visible=False) timerResolution(False)
config.eye_display_id = EyeId.RIGHT print("\033[94m[INFO] Exiting EyeTrackApp\033[0m")
config.settings.tracker_single_eye = 2 window.close()
config.save() os._exit(0) # I do not like this, but for now this fixes app hang on close
elif values[LEFT_EYE_RADIO_NAME] and config.eye_display_id != EyeId.LEFT: return
settings[0].stop()
eyes[0].stop()
eyes[1].start()
window[RIGHT_EYE_NAME].update(visible=False)
window[LEFT_EYE_NAME].update(visible=True)
window[SETTINGS_NAME].update(visible=False)
config.eye_display_id = EyeId.LEFT
config.settings.tracker_single_eye = 1
config.save()
elif values[BOTH_EYE_RADIO_NAME] and config.eye_display_id != EyeId.BOTH:
settings[0].stop()
eyes[0].stop()
eyes[1].start()
eyes[0].start()
window[LEFT_EYE_NAME].update(visible=True) try:
window[RIGHT_EYE_NAME].update(visible=True) # If window isn't in focus increase timeout and stop loop early
window[SETTINGS_NAME].update(visible=False) if window.TKroot.focus_get():
config.eye_display_id = EyeId.BOTH if fs:
config.settings.tracker_single_eye = 0 fs = False
config.save() tint = 33
window["-WINFOCUS-"].update(visible=False)
window["-WINFOCUS-"].hide_row()
window.refresh()
else:
if not fs:
fs = True
tint = 100
window["-WINFOCUS-"].update(visible=True)
window["-WINFOCUS-"].unhide_row()
continue
except KeyError:
pass
elif values[SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.SETTINGS: if values[key_manager.RIGHT_EYE_RADIO_NAME] and config.eye_display_id != EyeId.RIGHT:
eyes[0].stop() config.settings.gui_disable_gui = False
eyes[1].stop() eyes[0].start()
settings[0].start() eyes[1].stop()
window[RIGHT_EYE_NAME].update(visible=False) settings[0].stop()
window[LEFT_EYE_NAME].update(visible=False) settings[1].stop()
window[SETTINGS_NAME].update(visible=True) settings[2].stop()
config.eye_display_id = EyeId.SETTINGS window[key_manager.RIGHT_EYE_NAME].update(visible=True)
config.save() window[key_manager.LEFT_EYE_NAME].update(visible=False)
window[key_manager.SETTINGS_NAME].update(visible=False)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=False)
config.eye_display_id = EyeId.RIGHT
config.settings.tracker_single_eye = 2
config.save()
# Otherwise, render all of our cameras elif values[key_manager.LEFT_EYE_RADIO_NAME] and config.eye_display_id != EyeId.LEFT:
for eye in eyes: config.settings.gui_disable_gui = False
if eye.started(): settings[0].stop()
eye.render(window, event, values) settings[1].stop()
settings[0].render(window, event, values) settings[2].stop()
eyes[0].stop()
eyes[1].start()
window[key_manager.RIGHT_EYE_NAME].update(visible=False)
window[key_manager.LEFT_EYE_NAME].update(visible=True)
window[key_manager.SETTINGS_NAME].update(visible=False)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=False)
config.eye_display_id = EyeId.LEFT
config.settings.tracker_single_eye = 1
config.save()
elif values[key_manager.BOTH_EYE_RADIO_NAME] and config.eye_display_id != EyeId.BOTH:
config.settings.gui_disable_gui = False
settings[0].stop()
settings[1].stop()
settings[2].stop()
eyes[1].start()
eyes[0].start()
window[key_manager.LEFT_EYE_NAME].update(visible=True)
window[key_manager.RIGHT_EYE_NAME].update(visible=True)
window[key_manager.SETTINGS_NAME].update(visible=False)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=False)
config.eye_display_id = EyeId.BOTH
config.settings.tracker_single_eye = 0
config.save()
elif values[key_manager.SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.SETTINGS:
config.settings.gui_disable_gui = False
eyes[0].stop()
eyes[1].stop()
settings[1].stop()
settings[0].start()
settings[2].stop()
window[key_manager.RIGHT_EYE_NAME].update(visible=False)
window[key_manager.LEFT_EYE_NAME].update(visible=False)
window[key_manager.SETTINGS_NAME].update(visible=True)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=False)
config.eye_display_id = EyeId.SETTINGS
config.save()
elif values[key_manager.ALGO_SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.ALGOSETTINGS:
config.settings.gui_disable_gui = False
eyes[0].stop()
eyes[1].stop()
settings[0].stop()
settings[1].start()
settings[2].stop()
window[key_manager.RIGHT_EYE_NAME].update(visible=False)
window[key_manager.LEFT_EYE_NAME].update(visible=False)
window[key_manager.SETTINGS_NAME].update(visible=False)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=True)
config.eye_display_id = EyeId.ALGOSETTINGS
config.save()
elif values[key_manager.VRCFT_MODULE_SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.VRCFTMODULESETTINGS:
config.settings.gui_disable_gui = False
eyes[0].stop()
eyes[1].stop()
settings[0].stop()
settings[1].stop()
settings[2].start()
window[key_manager.RIGHT_EYE_NAME].update(visible=False)
window[key_manager.LEFT_EYE_NAME].update(visible=False)
window[key_manager.SETTINGS_NAME].update(visible=False)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=True)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=False)
config.eye_display_id = EyeId.VRCFTMODULESETTINGS
config.save()
else:
# Otherwise, render all
for eye in eyes:
if eye.started():
eye.render(window, event, values)
for setting in settings:
if setting.started():
setting.render(window, event, values)
if event == key_manager.GUIOFF_RADIO_NAME:
config.settings.gui_disable_gui = True
# eyes[0].stop()
# eyes[1].stop()
settings[0].stop()
settings[1].stop()
settings[2].stop()
window[key_manager.RIGHT_EYE_NAME].update(visible=False)
window[key_manager.LEFT_EYE_NAME].update(visible=False)
window[key_manager.SETTINGS_NAME].update(visible=False)
window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False)
window[key_manager.ALGO_SETTINGS_NAME].update(visible=False)
#config.eye_display_id = EyeId.GUIOFF
config.save()
window.close()
break
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -1,44 +1,47 @@
# -*- mode: py -3.11 ; coding: utf-8 -*- # -*- mode: python ; coding: utf-8 -*-
import sys ; sys.setrecursionlimit(sys.getrecursionlimit() * 5)
block_cipher = None block_cipher = None
resources=[("Audio/*", "Audio"), ("Images/*", "Images/"), ("pye3d/refraction_models/*", "pye3d/refraction_models/"), ("Models/*", "Models/"),("Tools/*", "Tools/")]
a = Analysis(['eyetrackapp.py'], a = Analysis(
pathex=[], ['eyetrackapp.py'],
binaries=[], pathex=[],
datas=[("Audio/*", "Audio"), ("Images/*", "Images/"), ("pye3d/refraction_models/*", "pye3d/refraction_models/")], binaries=[],
hiddenimports=['cv2', 'numpy', 'PySimpleGui'], datas=resources,
hookspath=[], hiddenimports=['cv2', 'numpy', 'PySimpleGui', 'pkg_resources.extern'],
hooksconfig={}, hookspath=[],
runtime_hooks=[], hooksconfig={},
excludes=[], runtime_hooks=[],
win_no_prefer_redirects=False, excludes=[],
win_private_assemblies=False, win_no_prefer_redirects=False,
cipher=block_cipher, win_private_assemblies=False,
noarchive=False) cipher=block_cipher,
pyz = PYZ(a.pure, a.zipped_data, noarchive=False
cipher=block_cipher) )
exe = EXE(pyz, pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
a.scripts,
[], exe = EXE(
exclude_binaries=True, pyz,
name='eyetrackapp', a.scripts,
debug=False, a.binaries,
bootloader_ignore_signals=False, a.datas,
strip=False, [],
upx=True, name='eyetrackapp',
console=True, debug=False,
disable_windowed_traceback=False, bootloader_ignore_signals=False,
target_arch=None, strip=False,
codesign_identity=None, upx=True,
entitlements_file=None, upx_exclude=[],
icon="Images/logo.ico" ) runtime_tmpdir=None,
coll = COLLECT(exe, console=True,
a.binaries, disable_windowed_traceback=False,
a.zipfiles, argv_emulation=False,
a.datas, target_arch=None,
strip=False, codesign_identity=None,
upx=True, entitlements_file=None,
upx_exclude=[], icon="Images/logo.ico",
name='EyeTrackApp') )

View File

@ -0,0 +1,852 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Haar Surround Feature: Summer, PallasNeko (Optimization)
Algorithm App Implementations and tweaks By: Prohurtz
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Summer Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import timeit
from functools import lru_cache
import cv2
import numpy as np
from utils.img_utils import safe_crop
import psutil
import sys
import os
process = psutil.Process(os.getpid()) # set process priority to low
try: # medium chance this does absolutely nothing but eh
sys.getwindowsversion()
except AttributeError:
process.nice(0) # UNIX: 0 low 10 high
process.nice()
else:
process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows
process.nice()
# from line_profiler_pycharm import profile
video_path = "ezgif.com-gif-maker.avi"
imshow_enable = False
calc_print_enable = False
save_video = False
skip_autoradius = False
skip_blink_detect = False
# cache param
lru_maxsize_vvs = 16
lru_maxsize_vs = 64
lru_maxsize_s = 128
# CV param
default_radius = 20
auto_radius_range = (default_radius - 18, default_radius + 15) # (10,30)
auto_radius_step = 1
blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames
# step==(x,y)
default_step = (
5,
5,
) # bigger the steps,lower the processing time! ofc acc also takes an impact
class CvParameters:
# It may be a little slower because a dict named "self" is read for each function call.
def __init__(self, radius, step):
# self.prev_radius=radius
self._radius = radius
self.pad = 2 * radius
# self.prev_step=step
self._step = step
self._hsf = HaarSurroundFeature(radius)
def get_rpsh(self):
return self._radius, self.pad, self._step, self._hsf
# Essentially, the following would be preferable, but it would take twice as long to call.
# return self.radius, self.pad, self.step, self.hsf
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, now_radius):
# self.prev_radius=self._radius
self._radius = now_radius
self.pad = 2 * now_radius
self.hsf = now_radius
@property
def step(self):
return self._step
@step.setter
def step(self, now_step):
# self.prev_step=self.step
self._step = now_step
@property
def hsf(self):
return self._hsf
@hsf.setter
def hsf(self, now_radius):
self._hsf = HaarSurroundFeature(now_radius)
class HaarSurroundFeature:
def __init__(self, r_inner, r_outer=None, val=None):
if r_outer is None:
r_outer = r_inner * 3
# print(r_outer)
r_inner2 = r_inner * r_inner
count_inner = r_inner2
count_outer = r_outer * r_outer - r_inner2
if val is None:
val_inner = 1.0 / r_inner2
val_outer = -val_inner * count_inner / count_outer
else:
val_inner = val[0]
val_outer = val[1]
self.val_in = float(val_inner) # np.array(val_inner, dtype=np.float64)
self.val_out = float(val_outer) # np.array(val_outer, dtype=np.float64)
self.r_in = r_inner
self.r_out = r_outer
def get_kernel(self):
# Defined here, but not yet used?
# Create a kernel filled with the value of self.val_out
kernel = np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) * self.val_out
# Set the values of the inner area of the kernel using array slicing
start = self.r_out - self.r_in
end = self.r_out + self.r_in - 1
kernel[start:end, start:end] = self.val_in
return kernel
def to_gray(frame):
# Faster by quitting checking if the input image is already grayscale
# Perhaps it would be faster with less overhead to call cv2.cvtColor directly instead of using this function
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
@lru_cache(maxsize=lru_maxsize_vvs)
def get_frameint_empty_array(frame_shape, pad, x_step, y_step, r_in, r_out):
frame_int_dtype = np.intc
frame_pad = np.empty((frame_shape[0] + (pad * 2), frame_shape[1] + (pad * 2)), dtype=np.uint8)
row, col = frame_pad.shape
frame_int = np.empty((row + 1, col + 1), dtype=frame_int_dtype)
y_steps_arr = np.arange(pad, row - pad, y_step, dtype=np.int16)
x_steps_arr = np.arange(pad, col - pad, x_step, dtype=np.int16)
len_sx, len_sy = len(x_steps_arr), len(y_steps_arr)
len_syx = (len_sy, len_sx)
y_end = pad + (y_step * (len_sy - 1))
x_end = pad + (x_step * (len_sx - 1))
y_rin_m = slice(pad - r_in, y_end - r_in + 1, y_step)
y_rin_p = slice(pad + r_in, y_end + r_in + 1, y_step)
x_rin_m = slice(pad - r_in, x_end - r_in + 1, x_step)
x_rin_p = slice(pad + r_in, x_end + r_in + 1, x_step)
in_p00 = frame_int[y_rin_m, x_rin_m]
in_p11 = frame_int[y_rin_p, x_rin_p]
in_p01 = frame_int[y_rin_m, x_rin_p]
in_p10 = frame_int[y_rin_p, x_rin_m]
y_ro_m = np.maximum(y_steps_arr - r_out, 0) # [:,np.newaxis]
x_ro_m = np.maximum(x_steps_arr - r_out, 0) # [np.newaxis,:]
y_ro_p = np.minimum(row, y_steps_arr + r_out) # [:,np.newaxis]
x_ro_p = np.minimum(col, x_steps_arr + r_out) # [np.newaxis,:]
inner_sum = np.empty(len_syx, dtype=frame_int_dtype)
outer_sum = np.empty(len_syx, dtype=frame_int_dtype)
out_p_temp = np.empty((len_sy, col + 1), dtype=frame_int_dtype)
out_p00 = np.empty(len_syx, dtype=frame_int_dtype)
out_p11 = np.empty(len_syx, dtype=frame_int_dtype)
out_p01 = np.empty(len_syx, dtype=frame_int_dtype)
out_p10 = np.empty(len_syx, dtype=frame_int_dtype)
response_list = np.empty(len_syx, dtype=np.float64) # or np.int32
frame_conv = np.zeros(shape=(row - 2 * pad, col - 2 * pad), dtype=np.uint8) # or np.float64
frame_conv_stride = frame_conv[::y_step, ::x_step]
return (
frame_pad,
frame_int,
inner_sum,
in_p00,
in_p11,
in_p01,
in_p10,
y_ro_m,
x_ro_m,
y_ro_p,
x_ro_p,
outer_sum,
out_p_temp,
out_p00,
out_p11,
out_p01,
out_p10,
response_list,
frame_conv,
frame_conv_stride,
)
def conv_int(
frame_int,
kernel,
inner_sum,
in_p00,
in_p11,
in_p01,
in_p10,
y_ro_m,
x_ro_m,
y_ro_p,
x_ro_p,
outer_sum,
out_p_temp,
out_p00,
out_p11,
out_p01,
out_p10,
response_list,
frame_conv_stride,
):
# inner_sum[:, :] = in_p00 + in_p11 - in_p01 - in_p10
cv2.add(in_p00, in_p11, dst=inner_sum)
cv2.subtract(inner_sum, in_p01, dst=inner_sum)
cv2.subtract(inner_sum, in_p10, dst=inner_sum)
# p00 calc
frame_int.take(y_ro_m, axis=0, mode="clip", out=out_p_temp)
out_p_temp.take(x_ro_m, axis=1, mode="clip", out=out_p00)
# p01 calc
out_p_temp.take(x_ro_p, axis=1, mode="clip", out=out_p01)
# p11 calc
frame_int.take(y_ro_p, axis=0, mode="clip", out=out_p_temp)
out_p_temp.take(x_ro_p, axis=1, mode="clip", out=out_p11)
# p10 calc
out_p_temp.take(x_ro_m, axis=1, mode="clip", out=out_p10)
# outer_sum[:, :] = out_p00 + out_p11 - out_p01 - out_p10 - inner_sum
cv2.add(out_p00, out_p11, dst=outer_sum)
cv2.subtract(outer_sum, out_p01, dst=outer_sum)
cv2.subtract(outer_sum, out_p10, dst=outer_sum)
cv2.subtract(outer_sum, inner_sum, dst=outer_sum)
# cv2.transform(np.asarray([p00, p11, -p01, -p10, -inner_sum]).transpose((1, 2, 0)), np.ones((1, 5)),
# dst=outer_sum) # https://answers.opencv.org/question/3120/how-to-sum-a-3-channel-matrix-to-a-one-channel-matrix/
# np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list)
# response_list += kernel.val_out * outer_sum
cv2.addWeighted(
inner_sum,
kernel.val_in,
outer_sum, # or p00 + p11 - p01 - p10 - inner_sum
kernel.val_out,
0.0,
dtype=cv2.CV_64F, # or cv2.CV_32S
dst=response_list,
)
min_response, _, min_loc, _ = cv2.minMaxLoc(response_list)
frame_conv_stride[:, :] = response_list
# or
# frame_conv_stride[:, :] = response_list.astype(np.uint8)
return min_response, min_loc
@lru_cache(maxsize=lru_maxsize_s)
def get_hsf_center(padding, x_step, y_step, min_loc): # min_x,min_y):
return (
padding + (x_step * min_loc[0]) - padding,
padding + (y_step * min_loc[1]) - padding,
)
class AutoRadiusCalc(object):
def __init__(self):
self.response_list = []
self.radius_cand_list = []
self.adj_comp_flag = False
self.radius_middle_index = None
self.left_item = None
self.right_item = None
self.left_index = None
self.right_index = None
def get_radius(self):
prev_res_len = len(self.response_list)
# adjustment of radius
if prev_res_len == 1:
# len==1==response_list==[default_radius]
self.adj_comp_flag = False
return auto_radius_range[0]
elif prev_res_len == 2:
# len==2==response_list==[default_radius, auto_radius_range[0]]
self.adj_comp_flag = False
return auto_radius_range[1]
elif prev_res_len == 3:
# len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]]
if self.response_list[1][1] < self.response_list[2][1]:
self.left_item = self.response_list[1]
self.right_item = self.response_list[0]
else:
self.left_item = self.response_list[0]
self.right_item = self.response_list[2]
self.radius_cand_list = [
i
for i in range(
self.left_item[0],
self.right_item[0] + auto_radius_step,
auto_radius_step,
)
]
self.left_index = 0
self.right_index = len(self.radius_cand_list) - 1
self.radius_middle_index = (self.left_index + self.right_index) // 2
self.adj_comp_flag = False
return self.radius_cand_list[self.radius_middle_index]
else:
if self.left_index <= self.right_index and self.left_index != self.radius_middle_index:
if (self.left_item[1] + self.response_list[-1][1]) < (self.right_item[1] + self.response_list[-1][1]):
self.right_item = self.response_list[-1]
self.right_index = self.radius_middle_index - 1
self.radius_middle_index = (self.left_index + self.right_index) // 2
self.adj_comp_flag = False
return self.radius_cand_list[self.radius_middle_index]
if (self.left_item[1] + self.response_list[-1][1]) > (self.right_item[1] + self.response_list[-1][1]):
self.left_item = self.response_list[-1]
self.left_index = self.radius_middle_index + 1
self.radius_middle_index = (self.left_index + self.right_index) // 2
self.adj_comp_flag = False
return self.radius_cand_list[self.radius_middle_index]
self.adj_comp_flag = True
return self.radius_cand_list[self.radius_middle_index]
def get_radius_base(self):
"""
Use it when the new version doesn't work well.
:return:
"""
prev_res_len = len(self.response_list)
# adjustment of radius
if prev_res_len == 1:
# len==1==response_list==[default_radius]
self.adj_comp_flag = False
return auto_radius_range[0]
elif prev_res_len == 2:
# len==2==response_list==[default_radius, auto_radius_range[0]]
self.adj_comp_flag = False
return auto_radius_range[1]
elif prev_res_len == 3:
# len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]]
sort_res = sorted(self.response_list, key=lambda x: x[1])[0]
# Extract the radius with the lowest response value
if sort_res[0] == default_radius:
# If the default value is best, change now_mode to init after setting radius to the default value.
self.adj_comp_flag = True
return default_radius
elif sort_res[0] == auto_radius_range[0]:
self.radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, auto_radius_step)][1:]
self.adj_comp_flag = False
return self.radius_cand_list.pop()
else:
self.radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], auto_radius_step)][1:]
self.adj_comp_flag = False
return self.radius_cand_list.pop()
else:
# Try the contents of the radius_cand_list in order until the radius_cand_list runs out
# Better make it a binary search.
if len(self.radius_cand_list) == 0:
sort_res = sorted(self.response_list, key=lambda x: x[1])[0]
self.adj_comp_flag = True
return sort_res[0]
else:
self.adj_comp_flag = False
return self.radius_cand_list.pop()
def add_response(self, radius, response):
self.response_list.append((radius, response))
return None
class BlinkDetector(object):
def __init__(self):
self.response_list = []
self.response_max = None
self.enable_detect_flg = False
self.quartile_1 = None
def calc_thresh(self):
# Calculate response_max by computing interquartile range, IQR
# self.response_listo = np.array(self.response_listo)
# 25%,75%
# This value may need to be adjusted depending on the environment.
# quartile_1, quartile_3 = np.percentile(self.response_listo, [25, 75])
# iqr = quartile_3 - quartile_1
# self.response_maxo = quartile_3 + (iqr * 1.5)
# quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75])
# or
quartile_1, quartile_3 = np.percentile(np.array(self.response_list), [25, 75])
self.quartile_1 = quartile_1
iqr = quartile_3 - quartile_1
# response_min = quartile_1 - (iqr * 1.5)
self.response_max = float(quartile_3 + (iqr * 1.5))
# or
# self.response_max = quartile_3 + (iqr * 1.5)
self.enable_detect_flg = True
return None
def detect(self, now_response):
return now_response > self.response_max
def add_response(self, response):
self.response_list.append(response)
return None
def response_len(self):
return len(self.response_list)
class CenterCorrection(object):
def __init__(self):
# Tunable parameters
kernel_size = 7 # 3 or 5 or 7
self.hist_thr = float(4) # 4%
self.center_q1_radius = 20
self.setup_comp = False
self.quartile_1 = None
self.radius = None
self.frame_shape = None
self.frame_mask = None
self.frame_bin = None
self.frame_final = None
self.morph_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))
self.morph_kernel2 = np.ones((3, 3))
self.hist_index = np.arange(256)
self.hist = np.empty((256, 1))
self.hist_norm = np.empty((256, 1))
def init_array(self, gray_shape, quartile_1, radius):
self.frame_shape = gray_shape
self.frame_mask = np.empty(gray_shape, dtype=np.uint8)
self.frame_bin = np.empty(gray_shape, dtype=np.uint8)
self.frame_final = np.empty(gray_shape, dtype=np.uint8)
self.quartile_1 = quartile_1
self.radius = radius
self.setup_comp = True
# def reset_array(self):
# self.frame_mask.fill(0)
def correction(self, gray_frame, orig_x, orig_y):
center_x, center_y = orig_x, orig_y
self.frame_mask.fill(0)
# cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1)
# bottleneck
cv2.calcHist([gray_frame], [0], None, [256], [0, 256], hist=self.hist)
cv2.normalize(self.hist, self.hist_norm, alpha=100.0, norm_type=cv2.NORM_L1)
hist_per = self.hist_norm.cumsum()
hist_index_list = self.hist_index[hist_per >= self.hist_thr]
frame_thr = (
hist_index_list[0]
if len(hist_index_list)
else np.percentile(cv2.bitwise_or(255 - self.frame_mask, gray_frame), 4)
)
# bottleneck
self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[1]
cropped_x, cropped_y, cropped_w, cropped_h = cv2.boundingRect(self.frame_bin)
self.frame_final = cv2.bitwise_and(self.frame_bin, self.frame_mask)
# bottleneck
self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel)
self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_OPEN, self.morph_kernel)
if (cropped_h, cropped_w) == self.frame_shape:
# Not detected.
base_x, base_y = center_x, center_y
else:
base_x = cropped_x + cropped_w // 2
base_y = cropped_y + cropped_h // 2
if self.frame_final[base_y, base_x] != 1:
if self.frame_final[center_y, center_x] != 1:
self.frame_final = cv2.morphologyEx(
self.frame_final,
cv2.MORPH_DILATE,
self.morph_kernel2,
iterations=3,
)
else:
base_x, base_y = center_x, center_y
contours, _ = cv2.findContours(self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours_box = [cv2.boundingRect(cnt) for cnt in contours]
contours_dist = np.array(
[
abs(base_x - (cnt_x + cnt_w / 2)) + abs(base_y - (cnt_y + cnt_h / 2))
for cnt_x, cnt_y, cnt_w, cnt_h in contours_box
]
)
if len(contours_box):
cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[contours_dist.argmin()]
x = cropped_x2 + cropped_w2 // 2
y = cropped_y2 + cropped_h2 // 2
else:
x = center_x
y = center_y
# if imshow_enable:
# cv2.circle(frame, (orig_x, orig_y), 10, (255, 0, 0), -1)
# cv2.circle(frame, (x, y), 7, (0, 0, 255), -1)
#
# out_x = center_x if abs(x - center_x) > radius else x
# out_y = center_y if abs(y - center_y) > radius else y
out_x, out_y = orig_x, orig_y
if (
gray_frame[
int(max(y - 5, 0)) : int(min(y + 5, self.frame_shape[0])),
int(max(x - 5, 0)) : int(min(x + 5, self.frame_shape[1])),
].min()
< self.quartile_1
):
out_x = x
out_y = y
# if imshow_enable:
# cv2.circle(frame, (out_x, out_y), 5, (0, 255, 0), -1)
#
# cv2.imshow("frame_bin", self.frame_bin * 255)
# cv2.imshow("frame_final", self.frame_final * 255)
return out_x, out_y
class HSF_cls(object):
def __init__(self):
# I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble.
# For measuring total processing time
self.main_start_time = timeit.default_timer()
self.rng = np.random.default_rng()
self.cvparam = CvParameters(default_radius, default_step)
self.cv_modeo = ["first_frame", "radius_adjust", "blink_adjust", "normal"]
self.now_modeo = self.cv_modeo[0]
self.auto_radius_calc = AutoRadiusCalc()
self.blink_detector = BlinkDetector()
self.center_q1 = BlinkDetector()
self.center_correct = CenterCorrection()
self.cap = None
self.timedict = {
"to_gray": [],
"int_img": [],
"conv_int": [],
"crop": [],
"total_cv": [],
}
def open_video(self, video_path):
# Temporary implementation to run
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise IOError("Error opening video stream or file")
self.cap = cap
return True
def read_frame(self):
# Temporary implementation to run
if not self.cap.isOpened():
return False
ret, frame = self.cap.read()
if ret:
# I have set it to grayscale (1ch) just in case, but if the frame is 1ch, this line can be commented out.
self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return True
return False
cct = 300
ransac_lower_x = 100
ransac_lower_y = 100
cx = 0
cy = 0
def single_run(self):
# Temporary implementation to run
## default_radius = 14
# cropbox=[] # debug code
frame = self.current_image_gray
if self.now_modeo == self.cv_modeo[1]:
# adjustment of radius
# debug print
# if calc_print_enable:
# temp_radius = self.auto_radius_calc.get_radius()
# print('Now radius:', temp_radius)
# self.cvparam.radius = temp_radius
self.cvparam.radius = self.auto_radius_calc.get_radius()
if self.auto_radius_calc.adj_comp_flag:
self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3]
radius, pad, step, hsf = self.cvparam.get_rpsh()
gray_frame = frame
# Calculate the integral image of the frame
(
frame_pad,
frame_int,
inner_sum,
in_p00,
in_p11,
in_p01,
in_p10,
y_ro_m,
x_ro_m,
y_ro_p,
x_ro_p,
outer_sum,
out_p_temp,
out_p00,
out_p11,
out_p01,
out_p10,
response_list,
frame_conv,
frame_conv_stride,
) = get_frameint_empty_array(gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out)
# BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used.
cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad)
cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S)
# Convolve the feature with the integral image
conv_int_start_time = timeit.default_timer()
response, hsf_min_loc = conv_int(
frame_int,
hsf,
inner_sum,
in_p00,
in_p11,
in_p01,
in_p10,
y_ro_m,
x_ro_m,
y_ro_p,
x_ro_p,
outer_sum,
out_p_temp,
out_p00,
out_p11,
out_p01,
out_p10,
response_list,
frame_conv_stride,
)
center_xy = get_hsf_center(pad, step[0], step[1], hsf_min_loc)
# Pseudo-visualization of HSF
# cv2.normalize(cv2.filter2D(cv2.filter2D(frame_pad, cv2.CV_64F, hsf.get_kernel()[hsf.get_kernel().shape[0]//2,:].reshape(1,-1), borderType=cv2.BORDER_CONSTANT), cv2.CV_64F, hsf.get_kernel()[:,hsf.get_kernel().shape[1]//2].reshape(-1,1), borderType=cv2.BORDER_CONSTANT),None,0,255,cv2.NORM_MINMAX,dtype=cv2.CV_8U))
# self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time)
crop_start_time = timeit.default_timer()
# Define the center point and radius
center_x, center_y = center_xy
upper_x = center_x + radius
lower_x = center_x - radius
upper_y = center_y + radius
lower_y = center_y - radius
# Crop the image using the calculated bounds
cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y)
# cropbox = [clamp(val, 0, gray_frame.shape[i]) for i, val in
# zip([1, 0, 1, 0], [lower_x, lower_y, upper_x, upper_y])] # debug code
if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]:
# If mode is first_frame or radius_adjust, record current radius and response
self.auto_radius_calc.add_response(radius, response)
elif self.now_modeo == self.cv_modeo[2]:
# Statistics for blink detection
if self.blink_detector.response_len() < blink_init_frames:
self.blink_detector.add_response(cv2.mean(cropped_image)[0])
upper_x = center_x + max(20, radius) # self.center_correct.center_q1_radius
lower_x = center_x - max(20, radius) # self.center_correct.center_q1_radius
upper_y = center_y + max(20, radius) # self.center_correct.center_q1_radius
lower_y = center_y - max(20, radius) # self.center_correct.center_q1_radius
self.center_q1.add_response(
cv2.mean(
safe_crop(
gray_frame,
lower_x,
lower_y,
upper_x,
upper_y,
keepsize=False,
)
)[0]
)
else:
self.blink_detector.calc_thresh()
self.center_q1.calc_thresh()
self.now_modeo = self.cv_modeo[3]
else:
if 0 in cropped_image.shape:
# If shape contains 0, it is not detected well.
print("Something's wrong.")
else:
orig_x, orig_y = center_x, center_y
if self.blink_detector.enable_detect_flg:
# If the average value of cropped_image is greater than response_max
# (i.e., if the cropimage is whitish
if self.blink_detector.detect(cv2.mean(cropped_image)[0]):
# blink
pass
else:
# pass
if not self.center_correct.setup_comp:
self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius)
elif self.center_correct.frame_shape != gray_frame.shape:
"""The resolution should have changed and the statistics should have changed, so essentially the statistics
need to be reworked, but implementation will be postponed as viability is the highest priority."""
self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius)
center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y)
# Define the center point and radius
center_xy = (center_x, center_y)
upper_x = center_x + radius
lower_x = center_x - radius
upper_y = center_y + radius
lower_y = center_y - radius
# Crop the image using the calculated bounds
cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y)
# cropbox = [clamp(val, 0, gray_frame.shape[i]) for i, val in
# zip([1, 0, 1, 0], [lower_x, lower_y, upper_x, upper_y])] # debug code
# if imshow_enable or save_video:
cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1)
cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1)
# If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way
# https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue
cv_end_time = timeit.default_timer()
# self.timedict["crop"].append(cv_end_time - crop_start_time)
# self.timedict["total_cv"].append(cv_end_time - cv_start_time)
# if calc_print_enable:
# the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly
# print('Kernel response:', response)
# print('Pixel position:', center_xy)
if imshow_enable:
if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]:
if 0 in cropped_image.shape:
# If shape contains 0, it is not detected well.
pass
else:
cv2.imshow("crop", cropped_image)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
pass
if self.now_modeo == self.cv_modeo[0]:
# Moving from first_frame to the next mode
if skip_autoradius and skip_blink_detect:
self.now_modeo = self.cv_modeo[3]
elif skip_autoradius:
self.now_modeo = self.cv_modeo[2]
else:
self.now_modeo = self.cv_modeo[1]
# debug code
# return center_x,center_y,cropbox,frame
return center_x, center_y, frame, radius
class External_Run_HSF(object):
def __init__(self, skip_autoradius_flg=False, radius=20):
# temporary code
global skip_autoradius, default_radius
skip_autoradius = skip_autoradius_flg
if skip_autoradius:
default_radius = radius
self.algo = HSF_cls()
def run(self, current_image_gray):
self.algo.current_image_gray = current_image_gray
# debug code
# center_x, center_y,cropbox, frame = self.algo.single_run()
# return center_x, center_y,cropbox, frame
center_x, center_y, frame, radius = self.algo.single_run()
return center_x, center_y, frame, radius
if __name__ == "__main__":
hsf = HSF_cls()
hsf.open_video(video_path)
while hsf.read_frame():
_ = hsf.single_run()

View File

@ -0,0 +1,344 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Intensity Based Openess By: Prohurtz, PallasNeko (Optimization)
Algorithm App Implementations By: Prohurtz
Copyright (c) 2025 EyeTrackVR <3
LICENSE: LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import numpy as np
import time
import os
import cv2
from eye import EyeId
from one_euro_filter import OneEuroFilter
import psutil
import sys
process = psutil.Process(os.getpid()) # set process priority to low
try: # medium chance this does absolutely nothing but eh
sys.getwindowsversion()
except AttributeError:
process.nice(0) # UNIX: 0 low 10 high
process.nice()
else:
process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows
process.nice()
# higher intensity means more closed/ more white/less pupil
# Hm I need an acronym for this, any ideas?
# IBO Intensity Based Openess
# HOW THIS WORKS:
# we get the intensity of pupil area from HSF crop, When the eyelid starts to close, the pupil starts being obstructed by skin which is generally lighter than the pupil.
# This causes the intensity to increase. We save all of the darkest intensities of each pupil position to calculate for pupil movement.
# ex. when you look up there is less pupil visible, which results in an uncalculated change in intensity even though the eyelid has not moved in a meaningful way.
# We compare the darkest intensity of that area, to the lightest (global) intensity to find the appropriate openness state via a float.
# Note.
# OpenCV on Windows will generate an error if the file path contains non-ASCII characters when using cv2.imread(), cv2.imwrite(), etc.
# https://stackoverflow.com/questions/43185605/how-do-i-read-an-image-from-a-path-with-unicode-characters
# https://github.com/opencv/opencv/issues/18305
def csv2data(frameshape, filepath):
# For data checking
frameshape = (frameshape[0], frameshape[1] + 1)
out = np.zeros(frameshape, dtype=np.uint32)
xy_list = []
val_list = []
with open(filepath, mode="r", encoding="utf-8") as in_f:
# Skip header.
_ = in_f.readline()
for s in in_f:
xyval = [int(val) for val in s.strip().split(",")]
xy_list.append((xyval[0], xyval[1]))
val_list.append(xyval[2])
xy_list = np.array(xy_list)
val_list = np.array(val_list)
out[xy_list[:, 1], xy_list[:, 0]] = val_list[:]
return out
def data2csv(data_u32, filepath):
# For data checking
nonzero_index = np.nonzero(data_u32) # (row,col)
data_list = data_u32[nonzero_index].tolist()
datalines = ["{},{},{}\n".format(x, y, val) for y, x, val in zip(*nonzero_index, data_list)]
with open(filepath, "w", encoding="utf-8") as out_f:
out_f.write("x,y,intensity\n")
out_f.writelines(datalines)
return
def u32_1ch_to_u16_3ch(img):
out = np.zeros((*img.shape[:2], 3), dtype=np.uint16)
# https://github.com/numpy/numpy/issues/2524
# https://stackoverflow.com/questions/52782511/why-is-numpy-slower-than-python-for-left-bit-shifts
out[:, :, 0] = img & np.uint32(65535)
out[:, :, 1] = (img >> np.uint32(16)) & np.uint32(65535)
return out
def u16_3ch_to_u32_1ch(img):
# The image format with the most bits that can be displayed on Windows without additional software and that opencv can handle is PNG's uint16
out = img[:, :, 0].astype(np.float64) # float64 = max 2^53
cv2.add(out, img[:, :, 1].astype(np.float64) * np.float64(65536), dst=out) # opencv did not have uint32 type
return out.astype(np.uint32) # cast
def newdata(frameshape):
print("\033[94m[INFO] Initialise data for blinking.\033[0m")
return np.zeros(frameshape, dtype=np.uint32)
class IntensityBasedOpeness:
def __init__(self, eye_id):
# todo: It is necessary to consider whether the filename can be changed in the configuration file, etc.
if eye_id in [EyeId.LEFT]:
self.imgfile = "IBO_LEFT.png"
else:
pass
if eye_id in [EyeId.RIGHT]:
self.imgfile = "IBO_RIGHT.png"
else:
pass
# self.imgfile = "IBO_LEFT.png" if eyeside is EyeLR.LEFT else "IBO_RIGHT.png"
# self.data[0, -1] = maxval, [1, -1] = rotation, [2, -1] = x, [3, -1] = y
self.data = None
self.lct = None
self.maxval = 0
# self.img_roi = self.now_roi == {"rotation": 0, "x": 0, "y": 0}
self.img_roi = np.zeros(3, dtype=np.int32)
self.now_roi = np.zeros(3, dtype=np.int32)
self.prev_val = 0.5
self.avg_intensity = 0.0
self.old = []
self.color = []
self.x = []
self.fc = 0
self.filterlist = []
self.averageList = []
self.openlist = []
self.eye_id = eye_id
self.maxinten = 0
self.tri_filter = []
min_cutoff = 0.0004
beta = 0.9
noisy_point = np.array([1, 1])
self.one_euro_filter = OneEuroFilter(noisy_point, min_cutoff=min_cutoff, beta=beta)
def check(self, frameshape):
# 0 in data is used as the initial value.
# When assigning a value, +1 is added to the value to be assigned.
self.load(frameshape)
# self.maxval = self.data[0, -1]
if self.lct is None:
self.lct = time.time()
def load(self, frameshape):
req_newdata = False
# Not very clever, but increase the width by 1px to save the maximum value.
frameshape = (frameshape[0], frameshape[1] + 1)
if self.data is None:
print(f"\033[92m[INFO] Loaded data for blinking: {self.imgfile}\033[0m")
if os.path.isfile(self.imgfile):
try:
img = cv2.imread(self.imgfile, flags=cv2.IMREAD_UNCHANGED)
# check code: cv2.absdiff(img,u32_1ch_to_u16_3ch(u16_3ch_to_u32_1ch(img)))
if img.shape[:2] != frameshape:
print("[WARN] Size does not match the input frame.")
req_newdata = True
else:
self.data = u16_3ch_to_u32_1ch(img)
self.img_roi[:] = self.data[1:4, -1]
if not np.array_equal(self.img_roi, self.now_roi):
# If the ROI recorded in the image file differs from the current ROI
req_newdata = True
else:
self.maxval = self.data[0, -1]
except:
print("[ERROR] File read error: {}".format(self.imgfile))
req_newdata = True
else:
print("\033[94m[INFO] File does not exist.\033[0m")
req_newdata = True
else:
if self.data.shape != frameshape or not np.array_equal(self.img_roi, self.now_roi):
# If the ROI recorded in the image file differs from the current ROI
# todo: Using the previous and current frame sizes and centre positions from the original, etc., the data can be ported to some extent, but there may be many areas where code changes are required.
print("[INFO] \033[94mFrame size changed.\033[0m")
req_newdata = True
if req_newdata:
self.data = newdata(frameshape)
self.maxval = 0
self.img_roi = self.now_roi.copy()
# data2csv(self.data, "a.csv")
# csv2data(frameshape,"a.csv")
def save(self):
self.data[0, -1] = self.maxval
self.data[1:4, -1] = self.now_roi
cv2.imwrite(self.imgfile, u32_1ch_to_u16_3ch(self.data))
# print("SAVED: {}".format(self.imgfile))
def change_roi(self, roiinfo: dict):
self.now_roi[:] = [v for v in roiinfo.values()]
def clear_filter(self):
self.data = None
self.filterlist.clear()
self.averageList.clear()
if os.path.exists(self.imgfile):
os.remove(self.imgfile)
def intense(self, x, y, frame, filterSamples, outputSamples):
# x,y = 0~(frame.shape[1 or 0]-1), frame = 1-channel frame cropped by ROI
self.check(frame.shape)
int_x, int_y = int(x), int(y)
if int_x < 0 or int_y < 0:
return self.prev_val
upper_x = min(int_x + 25, frame.shape[1] - 1) # TODO make this a setting
lower_x = max(int_x - 25, 0)
upper_y = min(int_y + 25, frame.shape[0] - 1)
lower_y = max(int_y - 25, 0)
# frame_crop = frame[lower_y:upper_y, lower_x:upper_x]
# frame = safe_crop(frame, lower_x, lower_y, upper_x, upper_y, False)
# ret_, th = cv2.threshold(frame_crop, 80, 1.0, cv2.THRESH_BINARY_INV, dst=frame_crop)
frame_crop = frame
# ret, f = cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY)
# ret, frame_crop = cv2.threshold(frame_crop, 80, 255, cv2.THRESH_BINARY)
# The same can be done with cv2.integral, but since there is only one area of the rectangle for which we want to know the total value, there is no advantage in terms of computational complexity.
intensity = frame_crop.sum() + 1
if len(self.filterlist) < filterSamples:
self.filterlist.append(intensity)
else:
self.filterlist.pop(0)
self.filterlist.append(intensity)
try:
if intensity >= np.percentile(self.filterlist, 99): # filter abnormally high values
intensity = self.maxval
except:
pass
# numpy:np.sum(),ndarray.sum()
# opencv:cv2.sumElems()
# I don't know which is faster.
changed = False
newval_flg = False
oob = False
if int_x >= frame.shape[1]:
int_x = frame.shape[1] - 1
oob = True
# print('CAUGHT X OUT OF BOUNDS')
if int_x < 0:
int_x = True
oob = True
# print('CAUGHT X UNDER BOUNDS')
if int_y >= frame.shape[0]:
int_y = frame.shape[0] - 1
oob = True
# print('CAUGHT Y OUT OF BOUNDS')
if int_y < 0:
int_y = 1
oob = True
# print('CAUGHT Y UNDER BOUNDS')
if oob != True and self.data.any():
data_val = self.data[int_y, int_x]
else:
data_val = 0
# max pupil per cord
if data_val == 0:
# The value of the specified coordinates has not yet been recorded.
self.data[int_y, int_x] = intensity
changed = True
newval_flg = True
else:
if intensity < data_val: # if current intensity value is less (more pupil), save that
self.data[int_y, int_x] = intensity # set value
changed = True
else:
intensitya = max(
data_val + 5000, 1
) # if current intensity value is not less use this is an agressive adjust, test
self.data[int_y, int_x] = intensitya # set value
changed = True
# min pupil global
if self.maxval == 0: # that value is not yet saved
self.maxval = intensity # set value at 0 index
else:
if intensity > self.maxval: # if current intensity value is more (less pupil), save that NOTE: we have the
self.maxval = intensity - 5 # set value at 0 index
else:
intensityd = max(
(self.maxval - 5), 1
) # continuously adjust closed intensity, will be set when user blink, used to allow eyes to close when lighting changes
self.maxval = intensityd # set value at 0 index
# print(intensityd, intensity)
if newval_flg:
# Do the same thing as in the original version.
eyeopen = self.prev_val # 0.9
else:
maxp = float(self.data[int_y, int_x])
minp = float(self.maxval)
eyeopen = (intensity - maxp) / (
minp - maxp
) # for whatever reason when input and maxp are too close it outputs high
eyeopen = 1 - eyeopen
if outputSamples > 0:
if len(self.averageList) < outputSamples:
self.averageList.append(eyeopen)
else:
self.averageList.pop(0)
self.averageList.append(eyeopen)
eyeopen = np.average(self.averageList)
eyeopen = np.clip(eyeopen, 0.0, 1.0)
if changed and ((time.time() - self.lct) > 11): # save every 5 seconds if something changed to save disk usage
self.save()
self.lct = time.time()
self.prev_val = eyeopen
return eyeopen

193
EyeTrackApp/leap.py Normal file
View File

@ -0,0 +1,193 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Algorithm App Implementations By: Prohurtz
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import os
import onnxruntime
import numpy as np
import cv2
import time
import math
from queue import Queue
import threading
from config import EyeTrackCameraConfig, EyeTrackConfig
from one_euro_filter import OneEuroFilter
import psutil
from utils.misc_utils import resource_path
from pathlib import Path
os.environ["OMP_NUM_THREADS"] = "1"
frames = 0
models = Path("Models")
def run_model(input_queue, output_queue, session):
while True:
frame = input_queue.get()
if frame is None:
break
img_np = np.array(frame, dtype=np.float32) / 255.0
gray_img = 0.299 * img_np[:, :, 0] + 0.587 * img_np[:, :, 1] + 0.114 * img_np[:, :, 2]
gray_img = np.expand_dims(np.expand_dims(gray_img, axis=0), axis=0)
ort_inputs = {session.get_inputs()[0].name: gray_img}
pre_landmark = session.run(None, ort_inputs)
pre_landmark = np.reshape(pre_landmark, (-1, 2))
output_queue.put((frame, pre_landmark))
def run_onnx_model(queues, session, frame):
for queue in queues:
if not queue.full():
queue.put(frame)
break
class LEAP_C:
def __init__(self, eye_config: EyeTrackCameraConfig, config: EyeTrackConfig):
self.last_lid = None
self.current_image_gray = None
self.current_image_gray_clean = None
onnxruntime.disable_telemetry_events()
self.num_threads = 1
self.queue_max_size = 1
self.model_path = resource_path(models / "pfld-sim.onnx")
self.print_fps = False
self.frames = 0
self.queues = [Queue(maxsize=self.queue_max_size) for _ in range(self.num_threads)]
self.threads = []
self.model_output = np.zeros((12, 2))
self.output_queue = Queue(maxsize=self.queue_max_size)
self.start_time = time.time()
opts = onnxruntime.SessionOptions()
opts.inter_op_num_threads = 1
opts.intra_op_num_threads = 1
opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
opts.enable_mem_pattern = False
self.one_euro_filter_float = OneEuroFilter(np.random.rand(1, 2), min_cutoff=0.0004, beta=0.9)
self.dmax = 0
self.dmin = 0
self.openlist = []
self.maxlist = []
self.previous_time = None
self.old_matrix = None
self.total_velocity_new = 0
self.total_velocity_avg = 0
self.total_velocity_old = 0
self.old_per = 0.0
self.delta_per_neg = 0.0
self.ort_session1 = onnxruntime.InferenceSession(self.model_path, opts, providers=["CPUExecutionProvider"])
self.eye_config: EyeTrackCameraConfig = eye_config
self.config: EyeTrackConfig = config
for i in range(self.num_threads):
thread = threading.Thread(
target=run_model,
args=(self.queues[i], self.output_queue, self.ort_session1),
name=f"Thread {i}",
)
self.threads.append(thread)
thread.start()
def leap_run(self):
img = self.current_image_gray_clean.copy()
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img_height, img_width = img.shape[:2]
frame = cv2.resize(img, (112, 112))
imgvis = self.current_image_gray.copy()
run_onnx_model(self.queues, self.ort_session1, frame)
if not self.output_queue.empty():
frame, pre_landmark = self.output_queue.get()
for point in pre_landmark:
x, y = point
x = int(x * img_width)
y = int(y * img_height)
cv2.circle(imgvis, (x, y), 3, (255, 255, 0), -1)
cv2.circle(imgvis, (x, y), 1, (0, 0, 255), -1)
d1 = math.dist(pre_landmark[1], pre_landmark[3])
d2 = math.dist(pre_landmark[2], pre_landmark[4])
d = (d1 + d2) / 2
if self.calib == 0:
self.openlist = []
self.eye_config.leap_calibrated = False
if not self.eye_config.leap_calibrated:
self.openlist.append(d)
self.eye_config.leap_calibration_percentile_90 = np.percentile(self.openlist, 90) if len(self.openlist) >= 10 else 0.8
self.eye_config.leap_calibration_percentile_2 = np.percentile(self.openlist, 2) - self.eye_config.leap_calibration_percentile_90
if len(self.openlist) >= self.config.settings.leap_calibration_samples:
self.eye_config.leap_calibrated = True
self.config.save()
print(f"[INFO] {'Left' if self.eye_config is self.config.left_eye else 'Right'} eye calibrated")
try:
if len(self.openlist) > 0 or self.eye_config.leap_calibrated:
per = (d - self.eye_config.leap_calibration_percentile_90) / self.eye_config.leap_calibration_percentile_2
per = 1 - per
per = np.clip(per, 0.0, 1.0)
else:
per = 0.8
except:
per = 0.8
x = pre_landmark[6][0]
y = pre_landmark[6][1]
self.last_lid = per
calib_array = np.array([per, per]).reshape(1, 2)
per = self.one_euro_filter_float(calib_array)[0][0]
if per <= 0.25:
per = 0.0
return imgvis, float(x*img_width), float(y*img_height), per
imgvis = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return imgvis, 0, 0, 0
class External_Run_LEAP:
def __init__(self, eye_config: EyeTrackCameraConfig, config: EyeTrackConfig):
self.algo = LEAP_C(eye_config, config)
def run(self, current_image_gray, current_image_gray_clean, calib):
self.algo.current_image_gray = current_image_gray
self.algo.current_image_gray_clean = current_image_gray_clean
self.algo.calib = calib
img, x, y, per = self.algo.leap_run()
return img, x, y, per

View File

@ -1,3 +1,6 @@
# https://github.com/jaantollander/OneEuroFilter
# LICENSE: MIT
import numpy as np import numpy as np
from time import time from time import time
@ -20,17 +23,17 @@ class OneEuroFilter:
self.beta = np.full(x0.shape, beta) self.beta = np.full(x0.shape, beta)
self.d_cutoff = np.full(x0.shape, d_cutoff) self.d_cutoff = np.full(x0.shape, d_cutoff)
# Previous values. # Previous values.
self.x_prev = x0.astype(np.float) self.x_prev = x0
self.dx_prev = np.full(x0.shape, dx0) self.dx_prev = np.full(x0.shape, dx0)
self.t_prev = time() self.t_prev = time()
def __call__(self, x): def __call__(self, x):
"""Compute the filtered signal.""" """Compute the filtered signal."""
assert x.shape == self.data_shape assert x.shape == self.data_shape
try:
t = time() t = time()
t_e = t - self.t_prev t_e = t - self.t_prev
if t_e != 0.0: #occasionally when switching to algos this becomes zero causing divide by zero errors crashing the filter. if t_e != 0.0: #occasionally when switching to HSF this becomes zero causing divide by zero errors crashing the filter.
t_e = np.full(x.shape, t_e) t_e = np.full(x.shape, t_e)
# The filtered derivative of the signal. # The filtered derivative of the signal.
@ -49,3 +52,8 @@ class OneEuroFilter:
self.t_prev = t self.t_prev = t
return x_hat return x_hat
else:
self.x_prev = x
return x
except:
print("\033[91m[ERROR] One Euro Filter Error. Is your system clock running properly?\033[0m")

View File

@ -1,197 +0,0 @@
from pythonosc import udp_client
from pythonosc import osc_server
from pythonosc import dispatcher
from winsound import PlaySound, SND_FILENAME, SND_ASYNC
import queue
import threading
from enum import IntEnum
import time
class EyeId(IntEnum):
RIGHT = 0
LEFT = 1
BOTH = 2
SETTINGS = 3
from config import EyeTrackConfig
class VRChatOSC:
# Use a tuple of blink (true, blinking, false, not), x, y for now. Probably clearer as a class but
# we're stuck in python 3.6 so still no dataclasses. God I hate python.
def __init__(self, cancellation_event: threading.Event, msg_queue: queue.Queue[tuple[bool, int, int]], main_config: EyeTrackConfig,):
self.main_config = main_config
self.config = main_config.settings
self.client = udp_client.SimpleUDPClient(self.config.gui_osc_address, int(self.config.gui_osc_port)) # use OSC port and address that was set in the config
self.cancellation_event = cancellation_event
self.msg_queue = msg_queue
def run(self):
start = time.time()
last_blink = time.time()
yl = 621
yr = 621
sx = 0
sy = 0
se = 0
lec = 0
rec = 0
rb = False
lb = False
while True:
if self.cancellation_event.is_set():
print("Exiting OSC Queue")
return
try:
(eye_id, eye_info) = self.msg_queue.get(block=True, timeout=0.1)
except:
continue
if not eye_info.blink:
if self.config.tracker_single_eye == 1 or self.config.tracker_single_eye == 2:
self.client.send_message("/avatar/parameters/LeftEyeX", eye_info.x) # only one eye is detected or there is an error. Send mirrored data to both eyes.
self.client.send_message("/avatar/parameters/RightEyeX", eye_info.x)
self.client.send_message("/avatar/parameters/EyesY", eye_info.y)
self.client.send_message("/avatar/parameters/RightEyeLid", float(0))# old param open right
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0.8)) # open r
self.client.send_message("/avatar/parameters/LeftEyeLid", float(0))# old param open left
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0.8)) # open left eye
if self.config.gui_blink_sync and not rb and not lb:
self.client.send_message("/avatar/parameters/RightEyeLid", float(0))# old param open right
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0.8)) # open r
self.client.send_message("/avatar/parameters/LeftEyeLid", float(0))# old param open left
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0.8)) # open left eye
else:
if eye_id in [EyeId.RIGHT]:
yr = eye_info.y
sx = eye_info.x
sy = eye_info.y
rb = False
self.client.send_message("/avatar/parameters/RightEyeX", eye_info.x)
if not self.config.gui_blink_sync or self.config.gui_blink_sync and not lb:
self.client.send_message("/avatar/parameters/RightEyeLid", float(0))# old param open right
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0.8)) # open right eye
if eye_id in [EyeId.LEFT]:
yl = eye_info.y
sx = eye_info.x
sy = eye_info.y
lb = False
self.client.send_message("/avatar/parameters/LeftEyeX", eye_info.x)
if not self.config.gui_blink_sync or self.config.gui_blink_sync and not rb:
self.client.send_message("/avatar/parameters/LeftEyeLid", float(0))# old param open left
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0.8)) # open left eye
if (yr != 621 and yl != 621) and (lb == False and rb == False):
y = (yr + yl) / 2
self.client.send_message("/avatar/parameters/EyesY", y)
else:
print(last_blink)
if self.config.gui_blink_sync:
if eye_id in [EyeId.LEFT]:
lb = True
if eye_id in [EyeId.RIGHT]:
rb = True
if rb == True and lb == True : # If both eyes are closed, blink
if last_blink > 0.5:
for i in range(4):
self.client.send_message("/avatar/parameters/RightEyeLid", float(1)) #close eye
self.client.send_message("/avatar/parameters/LeftEyeLid", float(1))
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0)) # close eye
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0))
last_blink = time.time() - last_blink
else:
if self.config.tracker_single_eye == 1 or self.config.tracker_single_eye == 2:
if last_blink > 0.5:
for i in range(4):
self.client.send_message("/avatar/parameters/RightEyeLid", float(1)) #close eye
self.client.send_message("/avatar/parameters/LeftEyeLid", float(1))
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0)) # close eye
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0))
last_blink = time.time() - last_blink
if not self.config.gui_eye_falloff:
if eye_id in [EyeId.LEFT]:
lb = True
if last_blink > 0.7:
for i in range(5):
self.client.send_message("/avatar/parameters/LeftEyeLid", float(1))
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0))
last_blink = time.time() - last_blink
if eye_id in [EyeId.RIGHT]:
rb = True
if last_blink > 0.7:
for i in range(5):
self.client.send_message("/avatar/parameters/RightEyeLid", float(1))
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0)) # close eye
last_blink = time.time() - last_blink
else:
if eye_id in [EyeId.LEFT]:
lb = True
if eye_id in [EyeId.RIGHT]:
rb = True
if rb or lb: # If one eye closed and fall off is enabled, mirror data
self.client.send_message("/avatar/parameters/LeftEyeX", sx) #Send mirrored data to both eyes.
self.client.send_message("/avatar/parameters/RightEyeX", sx)
self.client.send_message("/avatar/parameters/EyesY", sy)
self.client.send_message("/avatar/parameters/RightEyeLid", float(0))# old param open right
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0.8)) # open r
self.client.send_message("/avatar/parameters/LeftEyeLid", float(0))# old param open left
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0.8)) # open left eye
if rb and lb: # If both eyes are closed, blink
if last_blink > 0.5:
for i in range(4):
self.client.send_message("/avatar/parameters/RightEyeLid", float(1)) #close eye
self.client.send_message("/avatar/parameters/LeftEyeLid", float(1))
self.client.send_message("/avatar/parameters/RightEyeLidExpandedSqueeze", float(0)) # close eye
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(0))
last_blink = time.time() - last_blink
class VRChatOSCReceiver:
def __init__(self, cancellation_event: threading.Event, main_config: EyeTrackConfig, eyes: []):
self.config = main_config.settings
self.cancellation_event = cancellation_event
self.dispatcher = dispatcher.Dispatcher()
self.eyes = eyes # we cant import CameraWidget so any type it is
try:
self.server = osc_server.OSCUDPServer((self.config.gui_osc_address, int(self.config.gui_osc_receiver_port)), self.dispatcher)
except:
print(f"[ERROR] OSC Recieve port: {self.config.gui_osc_receiver_port} occupied. ")
def shutdown(self):
print("Shutting down OSC receiver")
try:
self.server.shutdown()
except:
pass
def recenter_eyes(self, address, osc_value):
if type(osc_value) != bool: return # just incase we get anything other than bool
if osc_value:
for eye in self.eyes:
eye.settings.gui_recenter_eyes = True
def recalibrate_eyes(self, address, osc_value):
if type(osc_value) != bool: return # just incase we get anything other than bool
if osc_value:
for eye in self.eyes:
eye.ransac.calibration_frame_counter = 300
PlaySound('Audio/start.wav', SND_FILENAME | SND_ASYNC)
def run(self):
# bind what function to run when specified OSC message is received
try:
self.dispatcher.map(self.config.gui_osc_recalibrate_address, self.recalibrate_eyes)
self.dispatcher.map(self.config.gui_osc_recenter_address, self.recenter_eyes)
# start the server
print("VRChatOSCReceiver serving on {}".format(self.server.server_address))
self.server.serve_forever()
except:
print(f"[ERROR] OSC Recieve port: {self.config.gui_osc_receiver_port} occupied. ")

View File

@ -0,0 +1,13 @@
import dataclasses
from enum import IntEnum
class OSCMessageType(IntEnum):
EYE_INFO = 1
VRCFT_MODULE_INFO = 2
@dataclasses.dataclass
class OSCMessage:
type: OSCMessageType
data: any

View File

@ -0,0 +1,17 @@
from pythonosc.udp_client import SimpleUDPClient
from osc.OSCMessage import OSCMessage
class VRCFTModuleSender:
set_command_pattern = "/command/{}/{}/"
def send(self, osc_message: OSCMessage, client: SimpleUDPClient):
command = osc_message.data.get("command", None)
field_to_send = osc_message.data.get("field", None)
value_to_send = osc_message.data.get("value", None)
if not command or not all([field_to_send, value_to_send is not None]):
print("[ERROR] Misconfiguration in received OSC message for the VRCFT Module")
return
client.send_message(self.set_command_pattern.format(command, field_to_send), value_to_send)

View File

@ -0,0 +1,358 @@
from pythonosc.udp_client import SimpleUDPClient
from eye import EyeId
from osc.OSCMessage import OSCMessage
from config import EyeTrackConfig, EyeTrackSettingsConfig
from enum import IntEnum
import time
def _eyelid_transformer(config, eye_blink):
if config.osc_invert_eye_close:
return float(1 - eye_blink)
else:
return float(eye_blink)
class OutputType(IntEnum):
V1_PARAMS = 1
V2_PARAMS = 2
NATIVE_PARAMS = 3
class VRChatOSCSender:
def __init__(self):
self.is_single_eye = False
self.falloff_enabled = False
self.left_y = 621
self.right_y = 621
self.r_eye_x = 0
self.l_eye_x = 0
self.r_eye_blink = 0.7
self.l_eye_blink = 0.7
self.l_eye_velocity = 0
self.r_eye_velocity = 1
self.left_last_blink = time.time()
self.right_last_blink = time.time()
self.r_dilation = 0
self.l_dilation = 0
def output_osc_info(
self,
osc_message: OSCMessage,
client: SimpleUDPClient,
main_config: EyeTrackConfig,
config: EyeTrackSettingsConfig,
):
eye_id, eye_info = osc_message.data
self.is_single_eye = self.get_is_single_eye(main_config.eye_display_id)
output_method = None
if config.gui_vrc_native:
output_method = self.output_native
if config.gui_osc_vrcft_v1:
output_method = self.output_v1_params
if config.gui_osc_vrcft_v2:
output_method = self.output_v2_params
if output_method:
output_method(
main_config=main_config,
config=config,
client=client,
eye_x=eye_info.x,
eye_y=eye_info.y,
eye_blink=eye_info.blink,
avg_velocity=eye_info.avg_velocity,
eye_id=eye_id,
pupil_dilation=eye_info.pupil_dilation,
)
@staticmethod
def get_is_single_eye(eye_display_id):
return eye_display_id in [EyeId.RIGHT, EyeId.LEFT, 0, 1]
def update_eye_state(self, eye_id, eye_x, eye_y, eye_blink, avg_velocity, pupil_dilation):
if eye_id == EyeId.LEFT:
self.l_eye_x = eye_x
self.l_eye_blink = eye_blink
self.left_y = eye_y
self.l_eye_velocity = avg_velocity
self.l_dilation = pupil_dilation
if eye_id == EyeId.RIGHT:
self.r_eye_x = eye_x
self.r_eye_blink = eye_blink
self.right_y = eye_y
self.r_eye_velocity = avg_velocity
self.r_dilation = pupil_dilation
def output_native(self, main_config, config, client, eye_x, eye_y, eye_blink, avg_velocity, eye_id, pupil_dilation):
default_eye_blink_params = {
"eye_id": eye_id,
"client": client,
"config": config,
}
self.update_eye_state(
eye_id=eye_id,
eye_x=eye_x,
eye_y=eye_y,
eye_blink=eye_blink,
avg_velocity=avg_velocity,
pupil_dilation=pupil_dilation,
)
if self.is_single_eye:
self.output_osc_native_blink(
**default_eye_blink_params,
)
client.send_message(
"/tracking/eye/LeftRightVec",
[float(eye_x), float(eye_y), 1.0, float(eye_x), float(eye_y), 1.0],
)
if eye_id in [EyeId.LEFT, EyeId.RIGHT, EyeId.BOTH] and not self.is_single_eye:
self.output_osc_native_blink(**default_eye_blink_params, single_eye_mode=False)
if not self.is_single_eye:
# vrc native ET (z values may need tweaking, they act like a scalar)
client.send_message(
"/tracking/eye/LeftRightVec",
[
float(self.l_eye_x),
float(self.left_y),
1.0,
float(self.r_eye_x),
float(self.right_y),
1.0,
],
)
def output_v1_params(
self,
main_config,
config,
client,
eye_x,
eye_y,
eye_blink,
avg_velocity,
eye_id,
pupil_dilation,
):
default_eye_blink_params = {
"eye_id": eye_id,
"client": client,
"config": config,
"left_eye_blink_address": config.osc_left_eye_close_address,
"right_eye_blink_address": config.osc_right_eye_close_address,
}
self.update_eye_state(
eye_id=eye_id,
eye_x=eye_x,
eye_y=eye_y,
eye_blink=eye_blink,
avg_velocity=avg_velocity,
pupil_dilation=pupil_dilation,
)
if self.is_single_eye:
client.send_message(config.osc_left_eye_x_address, eye_x)
client.send_message(config.osc_right_eye_x_address, eye_x)
client.send_message(config.osc_eyes_y_address, eye_y)
client.send_message(config.osc_eyes_pupil_dilation_address, pupil_dilation)
self.output_vrcft_blink_data(**default_eye_blink_params)
if eye_id in [EyeId.LEFT, EyeId.RIGHT] and not self.is_single_eye:
self.output_vrcft_blink_data(**default_eye_blink_params, single_eye_mode=False)
if eye_id == EyeId.LEFT:
client.send_message(config.osc_left_eye_x_address, self.l_eye_x)
self.left_y = eye_y
self.l_dilation = pupil_dilation
client.send_message(
config.osc_left_eye_close_address,
_eyelid_transformer(config, self.l_eye_blink),
)
if eye_id == EyeId.RIGHT:
client.send_message(config.osc_right_eye_x_address, self.r_eye_x)
self.right_y = eye_y
self.r_dilation = pupil_dilation
client.send_message(
config.osc_right_eye_close_address,
_eyelid_transformer(config, self.r_eye_blink),
)
if main_config.eye_display_id == EyeId.BOTH and self.right_y != 621 and self.left_y != 621:
y = (self.right_y + self.left_y) / 2
client.send_message(config.osc_eyes_y_address, y)
avg_dilation = (self.r_dilation + self.l_dilation) / 2 # i am unsure of this tbh.
client.send_message(config.osc_eyes_pupil_dilation_address, avg_dilation) # single param for both eyes.
def output_v2_params(
self,
main_config,
config,
client,
eye_x,
eye_y,
eye_blink,
avg_velocity,
eye_id,
pupil_dilation,
):
default_eye_blink_params = {
"eye_id": eye_id,
"client": client,
"config": config,
}
self.update_eye_state(
eye_id=eye_id,
eye_x=eye_x,
eye_y=eye_y,
eye_blink=eye_blink,
avg_velocity=avg_velocity,
pupil_dilation=pupil_dilation,
)
if self.is_single_eye:
client.send_message("/avatar/parameters/v2/EyeX", eye_x)
client.send_message("/avatar/parameters/v2/EyeY", eye_y)
client.send_message("/avatar/parameters/v2/PupilDilation", pupil_dilation)
self.output_vrcft_blink_data(
**default_eye_blink_params,
left_eye_blink_address="/avatar/parameters/v2/EyeLid",
right_eye_blink_address="/avatar/parameters/v2/EyeLid",
)
if eye_id in [EyeId.LEFT, EyeId.RIGHT] and not self.is_single_eye:
self.output_vrcft_blink_data(
**default_eye_blink_params,
left_eye_blink_address="/avatar/parameters/v2/EyeLidLeft",
right_eye_blink_address="/avatar/parameters/v2/EyeLidRight",
single_eye_mode=False,
)
if eye_id == EyeId.LEFT:
self.l_dilation = pupil_dilation
client.send_message("/avatar/parameters/v2/EyeLeftX", self.l_eye_x)
if self.left_y != 621:
client.send_message("/avatar/parameters/v2/EyeLeftY", eye_y)
client.send_message(
"/avatar/parameters/v2/EyeLidLeft",
_eyelid_transformer(config, self.l_eye_blink),
)
if eye_id == EyeId.RIGHT:
self.r_dilation = pupil_dilation
client.send_message("/avatar/parameters/v2/EyeRightX", self.r_eye_x)
if eye_y != 621:
client.send_message("/avatar/parameters/v2/EyeRightY", eye_y)
client.send_message(
"/avatar/parameters/v2/EyeLidRight",
_eyelid_transformer(config, self.r_eye_blink),
)
avg_pupil_dilation = (self.l_dilation + self.r_dilation) / 2
client.send_message("/avatar/parameters/v2/PupilDilation", avg_pupil_dilation)
def output_vrcft_blink_data(
self,
eye_id: EyeId,
client: SimpleUDPClient,
config,
left_eye_blink_address,
right_eye_blink_address,
single_eye_mode=True,
):
active_eye_blink = self.r_eye_blink if eye_id == EyeId.RIGHT else self.l_eye_blink
falloff_blink = self.r_eye_blink if eye_id == EyeId.LEFT else self.l_eye_blink
blink_address = right_eye_blink_address if eye_id == EyeId.RIGHT else left_eye_blink_address
side_name = "left" if eye_id == EyeId.RIGHT else "right"
last_side_blink = getattr(self, f"{side_name}_last_blink")
if single_eye_mode:
# in case of v1 params, we have to send the same data do each eye separately.
# so in case of v2 params, we will be generating one unnecessary call more
client.send_message(left_eye_blink_address, _eyelid_transformer(config, active_eye_blink))
client.send_message(right_eye_blink_address, _eyelid_transformer(config, active_eye_blink))
elif eye_id in [EyeId.RIGHT, EyeId.LEFT] and not single_eye_mode:
if active_eye_blink == 0.0:
if last_side_blink > 0.20:
for _ in range(5):
client.send_message(blink_address, _eyelid_transformer(config, active_eye_blink))
setattr(self, f"{side_name}_last_blink", time.time() - last_side_blink)
if config.gui_outer_side_falloff:
if falloff_blink == 0.0:
client.send_message(left_eye_blink_address, _eyelid_transformer(config, self.l_eye_blink))
client.send_message(right_eye_blink_address, _eyelid_transformer(config, self.r_eye_blink))
client.send_message(blink_address, _eyelid_transformer(config, active_eye_blink))
def output_osc_native_blink(
self,
eye_id: EyeId,
client,
config,
single_eye_mode=True,
):
blink_address = "/tracking/eye/EyesClosedAmount"
active_eye_blink = self.r_eye_blink if eye_id == EyeId.RIGHT else self.l_eye_blink
falloff_blink = self.r_eye_blink if eye_id == EyeId.LEFT else self.l_eye_blink
side_name = "left" if eye_id == EyeId.RIGHT else "right"
last_side_blink = getattr(self, f"{side_name}_last_blink")
def send_native_binary_blink(address: str, blink_value):
if last_side_blink > 0.2:
for _ in range(5):
client.send_message(address, float(1 - blink_value))
setattr(self, f"{side_name}_last_blink", time.time() - last_side_blink)
if single_eye_mode:
if active_eye_blink == 0.0:
send_native_binary_blink(blink_address, active_eye_blink)
else:
client.send_message(blink_address, float(1 - active_eye_blink))
if eye_id in [EyeId.RIGHT, EyeId.LEFT] and not single_eye_mode:
# in dual eye mode we need to average the blink to prevent flickering.
# VRC also **currently** doesn't support separate eyelids, so it's fine
if self.r_eye_blink or self.l_eye_blink:
averaged_eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2
else:
averaged_eye_blink = 0
client.send_message(
blink_address,
_eyelid_transformer(config, 1 - averaged_eye_blink),
)
if averaged_eye_blink == 0.0:
send_native_binary_blink(blink_address, averaged_eye_blink)
if config.gui_outer_side_falloff:
if falloff_blink == 0.0:
client.send_message(blink_address, float(1 - averaged_eye_blink))
if eye_id == EyeId.BOTH and self.r_eye_blink != 621 and self.r_eye_blink != 621:
if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0:
send_native_binary_blink(blink_address, active_eye_blink)
# this has a nasty habit of permanent-squint FIXME
averaged_eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2
client.send_message(blink_address, float(1 - averaged_eye_blink))

View File

215
EyeTrackApp/osc/osc.py Normal file
View File

@ -0,0 +1,215 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
from time import sleep
from typing import Dict, Optional, Iterable, Callable
from pythonosc import udp_client
from pythonosc import osc_server
from pythonosc import dispatcher
from config import EyeTrackConfig
from osc.OSCMessage import OSCMessage, OSCMessageType
from osc.VRCFTModuleMessenger import VRCFTModuleSender
from osc.VRChatOSCSender import VRChatOSCSender
import queue
import threading
class OSCManager:
def __init__(
self,
osc_message_in_queue: queue.Queue[OSCMessage],
config: EyeTrackConfig,
):
self.sender_cancellation_event = threading.Event()
self.receiver_cancellation_event = threading.Event()
self.listeners = {}
self.osc_message_in_queue = osc_message_in_queue
self.config = config
self.settings = config.settings
self.osc_sender: Optional[OSCSender] = None
self.osc_receiver = None
self.osc_sender_thread: Optional[threading.Thread] = None
self.osc_receiver_thread: Optional[threading.Thread] = None
def start(self):
self.setup_sender()
self.setup_receiver()
def setup_sender(self):
print(f"\033[92m[INFO] Setting up OSC sender\033[0m")
self.sender_cancellation_event.clear()
self.osc_sender = OSCSender(self.sender_cancellation_event, self.osc_message_in_queue, self.config)
self.osc_sender_thread = threading.Thread(target=self.osc_sender.run)
self.osc_sender_thread.start()
def setup_receiver(self):
if self.settings.gui_ROSC:
self.receiver_cancellation_event.clear()
print(f"\033[92m[INFO] Setting up OSC receiver\033[0m")
self.osc_receiver = OSCReceiver(self.receiver_cancellation_event, self.config, self.listeners)
self.osc_receiver_thread = threading.Thread(target=self.osc_receiver.run)
self.osc_receiver_thread.start()
def register_listeners(self, osc_address: str, callbacks: Iterable[Callable]):
if not self.listeners.get(osc_address):
self.listeners[osc_address] = []
self.listeners[osc_address].extend(callbacks)
def update(self, data: dict):
keys = set(data.keys())
sender_trigger_keys = {
"gui_osc_port",
"gui_VRCFTModulePort",
"gui_VRCFTModuleIPAddress",
"gui_use_module",
}
if sender_trigger_keys.intersection(keys):
self.stop_sender()
self.setup_sender()
receiver_trigger_keys = {
"gui_ROSC",
"gui_osc_receiver_port",
}
if receiver_trigger_keys.intersection(keys):
self.stop_receiver()
self.setup_receiver()
def shutdown(self):
self.stop_sender()
self.stop_receiver()
def stop_sender(self):
self.sender_cancellation_event.set()
self.osc_sender_thread.join()
def stop_receiver(self):
if self.osc_receiver_thread:
self.receiver_cancellation_event.set()
self.osc_receiver_thread.join()
class OSCSender:
def __init__(
self,
cancellation_event: threading.Event,
msg_queue: queue.Queue[OSCMessage],
main_config: EyeTrackConfig,
):
self.cancellation_event = cancellation_event
self.msg_queue = msg_queue
self.main_config = main_config
self.config = main_config.settings
self.vrc_sender = VRChatOSCSender()
self.module_sender = VRCFTModuleSender()
self.vrc_client = None
self.vrcft_client = None
def run(self):
self.vrc_client = udp_client.SimpleUDPClient(self.config.gui_osc_address, int(self.config.gui_osc_port))
self.vrcft_client = udp_client.SimpleUDPClient(
self.config.gui_VRCFTModuleIPAddress,
int(self.config.gui_VRCFTModulePort),
)
vrc_osc_output_client = self.vrc_client
if self.config.gui_use_module:
vrc_osc_output_client = self.vrcft_client
while not self.cancellation_event.is_set():
try:
osc_message: OSCMessage = self.msg_queue.get(block=True, timeout=0.1)
match osc_message.type:
case OSCMessageType.EYE_INFO:
self.vrc_sender.output_osc_info(
osc_message=osc_message,
client=vrc_osc_output_client,
main_config=self.main_config,
config=self.config,
)
case OSCMessageType.VRCFT_MODULE_INFO:
self.module_sender.send(osc_message=osc_message, client=self.vrcft_client)
case _:
raise Exception("Encountered message without a handler %s", osc_message.type)
except TypeError:
continue
except queue.Empty:
continue
class OSCReceiver:
def __init__(
self,
cancellation_event: threading.Event,
main_config: EyeTrackConfig,
listeners: Dict[str, Callable[[OSCMessage], None]],
):
self.config = main_config.settings
self.cancellation_event = cancellation_event
self.dispatcher = dispatcher.Dispatcher()
self.listeners = listeners
self.server_thread = None
try:
# this thing sucks ass god fucking damn it.
# like, there is no way of shutting it down UNLESS you run it in a thread
# which is kinda dumb, but oh well.
# Also, it doesn't shutdown properly. It's STILL connected to the port
self.server = osc_server.OSCUDPServer(
(self.config.gui_osc_address, int(self.config.gui_osc_receiver_port)),
self.dispatcher,
)
except Exception: # noqa
print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m")
def shutdown(self):
print("\033[94m[INFO] Exiting OSC Receiver\033[0m")
try:
self.server.shutdown()
self.server_thread.join()
except Exception: # noqa
pass
def handle_osc_message(self, address, value):
for listener in self.listeners.get(address, []):
listener(OSCMessage(type=OSCMessageType.EYE_INFO, data=value))
def run(self):
try:
self.dispatcher.set_default_handler(self.handle_osc_message)
print("\033[92m[INFO] OSC Listening on {}\033[0m".format(self.server.server_address))
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
while not self.cancellation_event.is_set():
sleep(10)
self.shutdown()
except Exception: # noqa:
print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m")

View File

@ -0,0 +1,355 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2023 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import numpy as np
import time
from enum import IntEnum
from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC, resource_path
from utils.eye_falloff import velocity_falloff
import socket
import struct
import threading
import os
import subprocess
import math
from utils.calibration_3d import receive_calibration_data, converge_3d
from utils.misc_utils import resource_path
from pathlib import Path
tool = Path("Tools")
class TimeoutError(RuntimeError):
pass
class AsyncCall(object):
def __init__(self, fnc, callback=None):
self.Callable = fnc
self.Callback = callback
def __call__(self, *args, **kwargs):
self.Thread = threading.Thread(target=self.run, name=self.Callable.__name__, args=args, kwargs=kwargs)
self.Thread.start()
return self
def wait(self, timeout=None):
self.Thread.join(timeout)
if self.Thread.isAlive():
raise TimeoutError()
else:
return self.Result
def run(self, *args, **kwargs):
self.Result = self.Callable(*args, **kwargs)
if self.Callback:
self.Callback(self.Result)
class AsyncMethod(object):
def __init__(self, fnc, callback=None):
self.Callable = fnc
self.Callback = callback
def __call__(self, *args, **kwargs):
return AsyncCall(self.Callable, self.Callback)(*args, **kwargs)
def Async(fnc=None, callback=None):
if fnc == None:
def AddAsyncCallback(fnc):
return AsyncMethod(fnc, callback)
return AddAsyncCallback
else:
return AsyncMethod(fnc, callback)
class EyeId(IntEnum):
RIGHT = 0
LEFT = 1
BOTH = 2
SETTINGS = 3
class var:
average_velocity = 0
velocity_rolling_list = []
past_x = 0
past_y = 0
start_time = time.time()
r_eye_x = 0.0
l_eye_x = 0.0
left_y = 0.0
right_y = 0.0
l_eye_velocity = 0.0
r_eye_velocity = 0.0
overlay_active = False
falloff_latch = False
single_eye = True
left_enb = 0
right_enb = 0
eye_wait = 10
left_calib = False
right_calib = False
completed_3d_calib = 0
@Async
def center_overlay_calibrate(self):
tools = Path("Tools")
# try:
if var.overlay_active != True:
overlay_path = resource_path("tools/ETVR_SteamVR_Calibration_Overlay.exe")
os.startfile(overlay_path, arguments="center")
var.overlay_active = True
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ("localhost", 2112)
sock.bind(server_address)
data, address = sock.recvfrom(4096)
received_int = struct.unpack("!l", data)[0]
message = received_int
self.settings.gui_recenter_eyes = False
self.calibration_frame_counter = 0
var.overlay_active = False
# except:
# print("[WARN] Calibration overlay error. Make sure SteamVR is Running.")
# self.settings.gui_recenter_eyes = False
# var.overlay_active = False
@Async
def overlay_calibrate_3d(self):
try:
if var.overlay_active != True:
overlay_path = resource_path("tools/EyeTrackVR-Overlay.exe")
os.startfile(overlay_path)
var.overlay_active = True
while var.overlay_active:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ("localhost", 2112)
sock.bind(server_address)
data, address = sock.recvfrom(4096)
received_int = struct.unpack("!l", data)[0]
message = received_int
self.settings.gui_recenter_eyes = False
self.settings.grab_3d_point = True
print(message)
if message == 9:
var.overlay_active = False
except:
print("[WARN] Calibration overlay error. Make sure SteamVR is Running.")
self.settings.gui_recenter_eyes = False
var.overlay_active = False
class cal:
def cal_osc(self, cx, cy, angle):
# print(self.eye_id)
if cx == None or cy == None:
return 0, 0
if cx == 0:
cx = 1
if cy == 0:
cy = 1
if self.eye_id == EyeId.RIGHT:
flipx = self.settings.gui_flip_x_axis_right
else:
flipx = self.settings.gui_flip_x_axis_left
if self.calibration_3d_frame_counter == -621: # or self.settings.gui_3d_calibration:
self.calibration_3d_frame_counter = self.calibration_3d_frame_counter - 1
overlay_calibrate_3d(self)
self.config.calibration_points_3d = []
# print(self.eye_id, cx, cy)
# self.settings.gui_3d_calibration = False
if self.settings.grab_3d_point:
# Check if both calibrations are done
if var.left_calib and var.right_calib:
self.settings.grab_3d_point = False
var.left_calib = False
var.right_calib = False
print("end", len(self.config.calibration_points_3d), self.config.calibration_points_3d)
else:
# Check if it's the left eye and left calibration is not done yet
if self.eye_id == EyeId.LEFT and not var.left_calib:
var.left_calib = True
self.config.calibration_points_3d.append((cx, cy, 1))
# Check if it's the right eye and right calibration is not done yet
elif self.eye_id == EyeId.RIGHT and not var.right_calib:
var.right_calib = True
self.config.calibration_points_3d.append((cx, cy, 0))
if self.eye_id == EyeId.LEFT and len(self.config.calibration_points_3d) == 9 and var.left_calib == False:
var.left_calib = True
receive_calibration_data(self.config.calibration_points_3d, self.eye_id)
print("SENT LEFT EYE POINTS")
var.completed_3d_calib += 1
if self.eye_id == EyeId.RIGHT and len(self.config.calibration_points_3d) == 9 and var.right_calib == False:
var.right_calib = True
receive_calibration_data(self.config.calibration_points_3d, self.eye_id)
print("SENT RIGHT EYE POINTS")
var.completed_3d_calib += 1
# print(len(self.config.calibration_points), self.eye_id)
if var.completed_3d_calib >= 2:
converge_3d()
# pass
if self.calibration_frame_counter == 0:
self.calibration_frame_counter = None
self.config.calib_XOFF = cx
self.config.calib_YOFF = cy
self.baseconfig.save()
PlaySound(resource_path("Audio/completed.wav"), SND_FILENAME | SND_ASYNC)
if self.calibration_frame_counter == self.settings.calibration_samples:
self.config.calib_XMAX = -69420
self.config.calib_XMIN = 69420
self.config.calib_YMAX = -69420
self.config.calib_YMIN = 69420
self.blink_clear = True
self.calibration_frame_counter -= 1
elif self.calibration_frame_counter != None:
self.blink_clear = False
self.settings.gui_recenter_eyes = False
if cx > self.config.calib_XMAX:
self.config.calib_XMAX = cx
if cx < self.config.calib_XMIN:
self.config.calib_XMIN = cx
if cy > self.config.calib_YMAX:
self.config.calib_YMAX = cy
if cy < self.config.calib_YMIN:
self.config.calib_YMIN = cy
self.calibration_frame_counter -= 1
if self.settings.gui_recenter_eyes == True:
self.config.calib_XOFF = cx
self.config.calib_YOFF = cy
if self.ts == 0:
center_overlay_calibrate(self) # TODO, only call on windows machines?
self.settings.gui_recenter_eyes = False
PlaySound(resource_path("Audio/completed.wav"), SND_FILENAME | SND_ASYNC)
else:
self.ts = self.ts - 1
else:
self.ts = 10
out_x = 0.5
out_y = 0.5
if self.config.calib_XMAX != None and self.config.calib_XOFF != None:
calib_diff_x_MAX = self.config.calib_XMAX - self.config.calib_XOFF
if calib_diff_x_MAX == 0:
calib_diff_x_MAX = 1
calib_diff_x_MIN = self.config.calib_XMIN - self.config.calib_XOFF
if calib_diff_x_MIN == 0:
calib_diff_x_MIN = 1
calib_diff_y_MAX = self.config.calib_YMAX - self.config.calib_YOFF
if calib_diff_y_MAX == 0:
calib_diff_y_MAX = 1
calib_diff_y_MIN = self.config.calib_YMIN - self.config.calib_YOFF
if calib_diff_y_MIN == 0:
calib_diff_y_MIN = 1
xl = float((cx - self.config.calib_XOFF) / calib_diff_x_MAX)
xr = float((cx - self.config.calib_XOFF) / calib_diff_x_MIN)
yu = float((cy - self.config.calib_YOFF) / calib_diff_y_MIN)
yd = float((cy - self.config.calib_YOFF) / calib_diff_y_MAX)
if self.settings.gui_flip_y_axis: # check config on flipped values settings and apply accordingly
if yd >= 0:
out_y = max(0.0, min(1.0, yd))
if yu > 0:
out_y = -abs(max(0.0, min(1.0, yu)))
else:
if yd >= 0:
out_y = -abs(max(0.0, min(1.0, yd)))
if yu > 0:
out_y = max(0.0, min(1.0, yu))
if flipx:
if xr >= 0:
out_x = -abs(max(0.0, min(1.0, xr)))
if xl > 0:
out_x = max(0.0, min(1.0, xl))
else:
if xr >= 0:
out_x = max(0.0, min(1.0, xr))
if xl > 0:
out_x = -abs(max(0.0, min(1.0, xl)))
if self.settings.gui_outer_side_falloff:
run_time = time.time()
out_x_mult = out_x * 100
out_y_mult = out_y * 100
velocity = abs(
np.sqrt(abs(np.square(out_x_mult - var.past_x) - np.square(out_y_mult - var.past_y)))
/ ((var.start_time - run_time) * 10)
)
if len(var.velocity_rolling_list) < 15:
var.velocity_rolling_list.append(float(velocity))
else:
var.velocity_rolling_list.pop(0)
var.velocity_rolling_list.append(float(velocity))
var.average_velocity = sum(var.velocity_rolling_list) / len(var.velocity_rolling_list)
var.past_x = out_x_mult
var.past_y = out_y_mult
out_x, out_y = velocity_falloff(self, var, out_x, out_y)
try:
noisy_point = np.array([float(out_x), float(out_y)]) # fliter our values with a One Euro Filter
point_hat = self.one_euro_filter(noisy_point)
out_x = point_hat[0]
out_y = point_hat[1]
except:
pass
return out_x, out_y, var.average_velocity
else:
if self.printcal:
print("\033[91m[ERROR] Please Calibrate Eye(s).\033[0m")
self.printcal = False
return 0, 0, 0

483
EyeTrackApp/ransac.py Normal file
View File

@ -0,0 +1,483 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
RANSAC 3D By: Summer#2406 (Main Algorithm Engineer), Pupil Labs (pye3d), PallasNeko (Optimization)
Algorithm App Implementations By: Prohurtz, qdot (Initial App Creator)
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Summer Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
import cv2
import numpy as np
from eye import EyeId
from utils.img_utils import safe_crop
from utils.misc_utils import clamp
import os
import psutil
import sys
process = psutil.Process(os.getpid()) # set process priority to low
try: # medium chance this does absolutely nothing but eh
sys.getwindowsversion()
except AttributeError:
process.nice(0) # UNIX: 0 low 10 high
process.nice()
else:
process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows
process.nice()
def ellipse_model(data, y, f):
"""
There is no need to make this process a function, since making the process a function will slow it down a little by calling it.
The results may be slightly different from the lambda version due to calculation errors derived from float types, but the calculation results are virtually the same.
a = 1.0,b = P[0],c = P[1],d = P[2],e = P[3],f = P[4]
:param data:
:param y: np.c_[d, e, a, c, b]
:param f: f == P[4, 0]
:return: this_return == np.array([ellipse_model(x, y) for (x, y) in data ])
"""
return data.dot(y) + f
# @profile
def fit_rotated_ellipse_ransac(
data: np.ndarray,
rng: np.random.Generator,
iter=45,
sample_num=10,
offset=80, # 80.0, 10, 80
): # before changing these values, please read up on the ransac algorithm
# However if you want to change any value just know that higher iterations will make processing frames slower
effective_sample = None
# The array contents do not change during the loop, so only one call is needed.
# They say len is faster than shape.
# Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape
len_data = len(data)
if len_data < sample_num:
return None
# Type of calculation result
ret_dtype = np.float64
# Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting.
# If the array size is less than about 100, this is faster than rng.choice.
rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num]
# or
# I don't see any advantage to doing this.
# rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32)
# I don't think it looks beautiful.
# x,y,x**2,y**2,x*y,1,-1*x**2
datamod = np.concatenate(
[
data,
data**2,
(data[:, 0] * data[:, 1])[:, np.newaxis],
np.ones((len_data, 1), dtype=ret_dtype),
(-1 * data[:, 0] ** 2)[:, np.newaxis],
],
axis=1,
dtype=ret_dtype,
)
datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype)
datamod_rng = datamod[rng_sample]
datamod_rng6 = datamod_rng[:, :, 6]
datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]]
datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1))
# These two lines are one of the bottlenecks
datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap)
datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans)
datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5))
# I don't think it looks beautiful.
ellipse_y_arr = np.asarray(
[
datamod_rng_p[:, 2],
datamod_rng_p[:, 3],
np.ones(len(datamod_rng_p)),
datamod_rng_p[:, 1],
datamod_rng_p[:, 0],
],
dtype=ret_dtype,
)
ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0))
ellipse_data_abs = np.abs(ellipse_data_arr)
ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0)
effective_data_arr = ellipse_data_arr[ellipse_data_index]
effective_sample_p_arr = datamod_rng_p[ellipse_data_index]
return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr)
# @profile
def fit_rotated_ellipse(data, P):
a = 1.0
b = P[0]
c = P[1]
d = P[2]
e = P[3]
f = P[4]
# The cost of trigonometric functions is high.
theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64)
theta_sin = np.sin(theta, dtype=np.float64)
theta_cos = np.cos(theta, dtype=np.float64)
tc2 = theta_cos**2
ts2 = theta_sin**2
b_tcs = b * theta_cos * theta_sin
# Do the calculation only once
cxy = b**2 - 4 * a * c
cx = (2 * c * d - b * e) / cxy
cy = (2 * a * e - b * d) / cxy
# I just want to clear things up around here.
cu = a * cx**2 + b * cx * cy + c * cy**2 - f
cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)])
if cu > 1: # negatives can get thrown which cause errors, just ignore them
wh = np.sqrt(cu / cu_r)
else:
pass
w, h = wh[0], wh[1]
error_sum = np.sum(data)
# print("fitting error = %.3f" % (error_sum))
return (cx, cy, w, h, theta)
def get_center_noclamp(center_xy, radius):
center_x, center_y = center_xy
upper_x = center_x + radius
lower_x = center_x - radius
upper_y = center_y + radius
lower_y = center_y - radius
ransac_upper_x = center_x + max(20, radius)
ransac_lower_x = center_x - max(20, radius)
ransac_upper_y = center_y + max(20, radius)
ransac_lower_y = center_y - max(20, radius)
ransac_xy_offset = (ransac_lower_x, ransac_lower_y)
return (
center_x,
center_y,
upper_x,
lower_x,
upper_y,
lower_y,
ransac_lower_x,
ransac_lower_y,
ransac_upper_x,
ransac_upper_y,
ransac_xy_offset,
)
cct = 300
def RANSAC3D(self, hsrac_en):
f = False
ranf = False
blink = 0.8
angle = 0
if hsrac_en:
(
center_x,
center_y,
upper_x,
lower_x,
upper_y,
lower_y,
ransac_lower_x,
ransac_lower_y,
ransac_upper_x,
ransac_upper_y,
ransac_xy_offset,
) = get_center_noclamp((self.rawx, self.rawy), self.radius)
frame = safe_crop(
self.current_image_gray_clean,
int(ransac_lower_x),
int(ransac_lower_y),
int(ransac_upper_x),
int(ransac_upper_y),
1,
)
else:
frame = self.current_image_gray_clean
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
rng = np.random.default_rng()
newFrame2 = self.current_image_gray.copy()
# Convert the image to grayscale, and set up thresholding. Thresholds here are basically a
# low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user
# configurable in this utility as we're dealing with variable lighting amounts/placement, as
# well as camera positioning and lensing. Therefore, everyone's cutoff may be different.
#
# The goal of thresholding settings is to make sure we can ONLY see the pupil. This is why we
# crop the image earlier; it gives us less possible dark area to get confused about in the
# next step.
# Crop first to reduce the amount of data to process.
# frame = self.current_image_gray
# For measuring processing time of image processing
# Crop first to reduce the amount of data to process.
# frame = frame[0:len(frame) - 5, :]
# To reduce the processing data, blur.
if frame is None:
print("[WARN] Frame is empty")
self.failed = self.failed + 1 # we have failed, move onto next algo
return 0, 0, 0, frame, blink, 0, 0
else:
frame_gray = cv2.GaussianBlur(frame, (9, 9), 10)
# this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray)
maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1])
# crop 15% sqare around min_loc
# frame_gray = frame_gray[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf,
# max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf]
if self.settings.gui_legacy_ransac:
if self.eye_id in [EyeId.LEFT]:
threshold_value = self.settings.gui_legacy_ransac_thresh_left
else:
threshold_value = self.settings.gui_legacy_ransac_thresh_right
else:
threshold_value = min_val + self.settings.gui_thresh_add
_, thresh = cv2.threshold(frame_gray, threshold_value, 255, cv2.THRESH_BINARY)
try:
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
th_frame = 255 - closing
except:
# I want to eliminate try here because try tends to be slow in execution.
th_frame = 255 - frame_gray
contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
hull = []
# print(contours)
# This way is faster than contours[i]
# But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours]
for cnt in contours:
hull.append(cv2.convexHull(cnt, False))
if not hull:
# If empty, go to next loop
pass
try:
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
# ellipse = cv2.fitEllipse(maxcnt)
ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), rng)
if ransac_data is None:
# ransac_data is None==maxcnt.shape[0]<sample_num
# go to next loop
pass
cx, cy, w, h, theta = ransac_data
# print(cx, cy)
# cxi, cyi, wi, hi = int(cx), int(cy), int(w), int(h)
# cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
# cv2.circle(self.current_image_gray, (cx, cy), 2, (0, 0, 255), -1)
# cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
# cv2.ellipse(self.current_image_gray, (cx, cy), (w, h), theta * 180.0 / np.pi, 0.0, 360.0, (50, 250, 200), 1, )
# img = newImage2[y1:y2, x1:x2]
except:
ranf = True
pass
self.current_image_gray = frame
cv2.circle(self.current_image_gray, min_loc, 2, (0, 0, 255), -1) # the point of the darkest area in the image
# However eyes are annoyingly three dimensional, so we need to take this ellipse and turn it
# into a curve patch on the surface of a sphere (the eye itself). If it's not a sphere, see your
# ophthalmologist about possible issues with astigmatism.
try:
# Get axis and angle of the ellipse, using pupil labs 2d algos. The next bit of code ranges
# from somewhat to completely magic, as most of it happens in native libraries (hence passing
# via dicts).
result_2d = {}
result_2d_final = {}
result_2d["center"] = (cx, cy)
result_2d["axes"] = (w, h)
result_2d["angle"] = theta * 180.0 / np.pi
angle = result_2d["angle"]
result_2d_final["ellipse"] = result_2d
result_2d_final["diameter"] = w
result_2d_final["location"] = (cx, cy)
result_2d_final["confidence"] = 0.99
result_2d_final["timestamp"] = self.current_frame_number / self.current_fps
# Black magic happens here, but after this we have our reprojected pupil/eye, and all we had
# to do was sell our soul to satan and/or C++.
result_3d = self.detector_3d.update_and_detect(result_2d_final, self.current_image_gray)
# Now we have our pupil
ellipse_3d = result_3d["ellipse"]
# And our eyeball that the pupil is on the surface of
self.lkg_projected_sphere = result_3d["projected_sphere"]
# Record our pupil center
exm = ellipse_3d["center"][0]
eym = ellipse_3d["center"][1]
# print(result_2d["angle"])
d = result_3d["diameter_3d"]
self.cc_radius = int(float(self.lkg_projected_sphere["axes"][0]))
self.xc = int(float(self.lkg_projected_sphere["center"][0]))
self.yc = int(float(self.lkg_projected_sphere["center"][1]))
except:
f = True
csy = newFrame2.shape[0]
csx = newFrame2.shape[1]
if hsrac_en:
if ranf:
cx = self.rawx
cy = self.rawy
else:
# print(int(cx), int(clamp(cx + ransac_lower_x, 0, csx)), ransac_lower_x, csx, "y", int(cy), int(clamp(cy + ransac_lower_y, 0, csy)), ransac_lower_y, csy)
cx = int(clamp(cx + ransac_lower_x, 0, csx)) # dunno why this is being weird
cy = int(clamp(cy + ransac_lower_y, 0, csy))
# print(contours)
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
perscalarw = w / csx
perscalarh = h / csy
# print(abs(perscalarw-perscalarh))
# if abs(perscalarw-perscalarh) >= 0.2: # TODO setting
# blink = 0.0
if self.settings.gui_RANSACBLINK:
if self.ran_blink_check_for_file:
if self.eye_id in [EyeId.LEFT]:
file_path = "RANSAC_blink_LEFT.cfg"
if self.eye_id in [EyeId.RIGHT]:
file_path = "RANSAC_blink_RIGHT.cfg"
else:
file_path = "RANSAC_blink_RIGHT.cfg"
if os.path.exists(file_path):
with open(file_path, "r") as file:
self.blink_list = [float(line.strip()) for line in file]
else:
print(
f"\033[93m[INFO] RANSAC Blink Config '{file_path}' not found. Waiting for calibration.\033[0m"
)
self.ran_blink_check_for_file = False
if len(self.blink_list) == 10000: # self calibrate ransac blink IN TESTING
if self.eye_id in [EyeId.LEFT]:
with open("RANSAC_BLINK_LEFT.cfg", "w") as file:
for item in self.blink_list:
file.write(str(item) + "\n")
if self.eye_id in [EyeId.RIGHT]:
with open("RANSAC_BLINK_RIGHT.cfg", "w") as file:
for item in self.blink_list:
file.write(str(item) + "\n")
# print("SAVE")
# self.blink_list.pop(0)
self.blink_list.append(abs(perscalarw - perscalarh))
elif len(self.blink_list) < 10000:
self.blink_list.append(abs(perscalarw - perscalarh))
if abs(perscalarw - perscalarh) >= np.percentile(self.blink_list, 92):
blink = 0.0
try:
cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1) # TODO: fix visualizations with HSRAC
cv2.circle(self.current_image_gray, (int(cx), int(cy)), 2, (0, 0, 255), -1)
except:
pass
# try: #for some reason the pye3d visualizations are wack, im going to just not visualize it for now..
# cv2.ellipse(
# self.current_image_gray,
# tuple(int(v) for v in ellipse_3d["center"]),
# tuple(int(v) for v in ellipse_3d["axes"]),
# ellipse_3d["angle"],
# 0,
# 360, # start/end angle for drawing
# (0, 255, 0), # color (BGR): red
# )
# except Exception:
# Sometimes we get bogus axes and trying to draw this throws. Ideally we should check for
# validity beforehand, but for now just pass. It usually fixes itself on the next frame.
# pass
try:
# print(self.lkg_projected_sphere["angle"], self.lkg_projected_sphere["axes"], self.lkg_projected_sphere["center"])
cv2.ellipse(
newFrame2,
tuple(int(v) for v in self.lkg_projected_sphere["center"]),
tuple(int(v) for v in self.lkg_projected_sphere["axes"]),
self.lkg_projected_sphere["angle"],
0,
360, # start/end angle for drawing
(0, 255, 0), # color (BGR): red
)
# draw line from center of eyeball to center of pupil
cv2.line(
self.current_image_gray,
tuple(int(v) for v in self.lkg_projected_sphere["center"]),
tuple(int(v) for v in ellipse_3d["center"]),
(0, 255, 0), # color (BGR): red
)
except:
pass
self.current_image_gray = newFrame2
y, x = self.current_image_gray.shape
thresh = cv2.resize(thresh, (x, y))
try:
self.failed = 0 # we have succeded, continue with this
return cx, cy, angle, thresh, blink, w, h
except:
self.failed = self.failed + 1 # we have failed, move onto next algo
return 0, 0, 0, thresh, blink, 0, 0

View File

@ -0,0 +1,94 @@
from datetime import datetime, timedelta
from typing import Iterable
import PySimpleGUI as sg
from colorama import Fore
from threading import Event
from eye import EyeId
from config import EyeTrackConfig, EyeTrackSettingsConfig
class BaseSettingsWidget:
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, settings_modules: Iterable):
self.widget_id = widget_id
self.main_config = main_config
self.config = main_config.settings
self.last_error_printout = datetime.now() - timedelta(seconds=20)
self.error_printout_timeout = 2
self.reset_button_key = f"RESET_SETTINGS{widget_id}"
self.is_saving = False
self.initialized_modules = self._initialize_modules(settings_modules=settings_modules, widget_id=widget_id)
self.cancellation_event = Event()
self.cancellation_event.set()
def started(self):
return not self.cancellation_event.is_set()
def start(self):
if not self.cancellation_event.is_set():
return
self.cancellation_event.clear()
def stop(self):
if self.cancellation_event.is_set():
return
self.cancellation_event.set()
def _update_and_save_config(self, validated_data: dict):
self.main_config.update(validated_data, save=True)
self.is_saving = False
def _handle_errors(self, errors):
now = datetime.now()
elapsed_seconds = (datetime.now() - self.last_error_printout).seconds
if elapsed_seconds > self.error_printout_timeout:
self.last_error_printout = now
messages = [f"{Fore.RED}[ERROR]{Fore.RESET} {error['msg']} \n" for module_errors in errors for error in module_errors]
print("".join(messages))
def render(self, window, event, values):
validated_data, errors = {}, []
for module in self.initialized_modules:
module_validated_data = module.validate(values)
if module_validated_data.changes:
validated_data.update(module_validated_data.changes)
if module_validated_data.errors:
errors.append(module_validated_data.errors)
if not errors and validated_data and not self.is_saving:
self.is_saving = True
self._update_and_save_config(validated_data)
if errors:
self._handle_errors(errors)
self.handle_events(event, window)
def _initialize_modules(self, settings_modules, widget_id):
return [module(config=self.config, settings=self.main_config, widget_id=widget_id) for module in settings_modules]
def get_layout(self) -> Iterable:
general_settings_layout = []
for module in self.initialized_modules:
general_settings_layout.extend(module.get_layout())
widget_layout = [
[sg.Column(general_settings_layout, key=f"-GENERALSETTINGSLAYOUT{self.widget_id}-", background_color="#424042")],
[sg.Text("", background_color="#424042")],
[sg.Button("Reset settings to default", key=self.reset_button_key, button_color="#c40e23")]
]
return widget_layout
def handle_events(self, event, window):
if event == "__TIMEOUT__":
return
if event == self.reset_button_key:
self.reset_config(window)
def reset_config(self, window):
default_values = {}
base_settings = EyeTrackSettingsConfig()
print(f"\033[92m[INFO] Resetting config to defaults\033[0m")
for module in self.initialized_modules:
for key in module.get_key_for_panel_defaults():
default_val = getattr(base_settings, key)
widget_key = getattr(module, key)
default_values[key] = default_val
window[widget_key].update(default_val)
print(f"\033[92m[INFO] Config reset, saving\033[0m")
self._update_and_save_config(default_values)

View File

@ -0,0 +1,34 @@
from queue import Queue
from config import EyeTrackConfig
from eye import EyeId
from osc.OSCMessage import OSCMessage, OSCMessageType
from settings.BaseSettings import BaseSettingsWidget
from settings.modules.VRCFTSettingsModule import VRCFTSettingsModule
class VRCFTSettingsWidget(BaseSettingsWidget):
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue_in: Queue[OSCMessage]):
self.osc_queue = osc_queue_in
settings_modules = [
VRCFTSettingsModule,
]
super().__init__(widget_id, main_config, settings_modules)
def _update_and_save_config(self, validated_data: dict):
self.main_config.update(validated_data, save=True)
for field, value in validated_data.items():
self.osc_queue.put(
OSCMessage(
type=OSCMessageType.VRCFT_MODULE_INFO,
data={
"command": "set",
"field": field,
"value": value,
},
)
)
self.is_saving = False

View File

@ -0,0 +1,45 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
from config import EyeTrackConfig
from eye import EyeId
from settings.BaseSettings import BaseSettingsWidget
from settings.modules.AdvancedTrackingAlgoSettingsModule import (
AdvancedTrackingAlgoSettingsModule,
)
from settings.modules.BlinkAlgoModule import BlinkAlgoSettingsModule
from settings.modules.TrackingAlgorithmModule import TrackingAlgorithmModule
class AlgoSettingsWidget(BaseSettingsWidget):
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig):
settings_modules = [
TrackingAlgorithmModule,
BlinkAlgoSettingsModule,
AdvancedTrackingAlgoSettingsModule,
]
super().__init__(widget_id, main_config, settings_modules)

View File

@ -0,0 +1 @@
BACKGROUND_COLOR = "#424042"

View File

@ -0,0 +1,43 @@
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
Copyright (c) 2025 EyeTrackVR <3
LICENSE: Babble Software Distribution License 1.0
------------------------------------------------------------------------------------------------------
"""
from config import EyeTrackConfig
from eye import EyeId
from settings.BaseSettings import BaseSettingsWidget
from settings.modules.GeneralSettingsModule import GeneralSettingsModule
from settings.modules.OneEuroSettingsModule import OneEuroSettingsModule
from settings.modules.OSCSettingsModule import OSCSettingsModule
class SettingsWidget(BaseSettingsWidget):
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig):
settings_modules = [
GeneralSettingsModule,
OneEuroSettingsModule,
OSCSettingsModule,
]
super().__init__(widget_id, main_config, settings_modules)

View File

@ -0,0 +1,137 @@
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
import PySimpleGUI as sg
class AdvancedTrackingAlgoSettingsValidationModel(BaseValidationModel):
gui_HSF_radius_left: int
gui_HSF_radius_right: int
gui_blob_maxsize: int
gui_blob_minsize: int
gui_legacy_ransac_thresh_left: int
gui_legacy_ransac_thresh_right: int
gui_skip_autoradius: bool
gui_thresh_add: int
gui_threshold: int
gui_pupil_dilation: bool
class AdvancedTrackingAlgoSettingsModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.validation_model = AdvancedTrackingAlgoSettingsValidationModel
self.gui_blob_maxsize = f"-BLOBMAXSIZE{widget_id}-"
self.gui_blob_minsize = f"-BLOBMINSIZE{widget_id}-"
self.gui_skip_autoradius = f"-SKIPAUTORADIUS{widget_id}-"
self.gui_thresh_add = f"-THRESHADD{widget_id}-"
self.gui_threshold = f"-BLOBTHRESHOLD{widget_id}-"
self.gui_HSF_radius_left = f"-HSFRADIUSLEFT{widget_id}-"
self.gui_HSF_radius_right = f"-HSFRADIUSRIGHT{widget_id}-"
self.gui_legacy_ransac_thresh_right = f"-THRESHRIGHT{widget_id}-"
self.gui_legacy_ransac_thresh_left = f"-THRESHLEFT{widget_id}-"
self.gui_pupil_dilation = f"-EBPD{widget_id}-"
def get_layout(self):
return [
[sg.Text("Pupil Dilation Algo Settings:", background_color="#242224")],
[
sg.Checkbox(
"Ellipse Based Pupil Dilation",
default=self.config.gui_pupil_dilation,
key=self.gui_pupil_dilation,
background_color="#424042",
)
],
[sg.Text("Advanced Tracking Algorithm Settings:", background_color="#242224")],
[
sg.Checkbox(
"HSF: Skip Auto Radius",
default=self.config.gui_skip_autoradius,
key=self.gui_skip_autoradius,
background_color="#424042",
tooltip="To gain more control and possibly better tracking quality of HSF, please disable auto radius to enable manual adjustment.",
),
],
[
sg.Text("Left HSF Radius:", background_color="#424042"),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_HSF_radius_left,
orientation="h",
key=self.gui_HSF_radius_left,
background_color="#424042",
tooltip="Adjusts the radius parameter for HSF. Only adjust if you are having tracking issues.",
),
sg.Text("Right HSF Radius:", background_color="#424042"),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_HSF_radius_right,
orientation="h",
key=self.gui_HSF_radius_right,
background_color="#424042",
tooltip="Adjusts the radius parameter for HSF. Only adjust if you are having tracking issues.",
),
],
[
sg.Text("RANSAC Thresh Add", background_color="#424042"),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_thresh_add,
orientation="h",
key=self.gui_thresh_add,
background_color="#424042",
tooltip="Adjusts the amount of threshold to add to RANSAC. Useful for fine tuning your setup.",
),
sg.Text("Blob Threshold", background_color="#424042"),
# TODO make this for right and left eyes? I dont know how vital that is..
sg.Slider(
range=(0, 110),
default_value=self.config.gui_threshold,
orientation="h",
key=self.gui_threshold,
background_color="#424042",
tooltip="Adjusts the threshold for blob tracking.",
),
],
[
sg.Text("Min Blob Size:", background_color="#424042"),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_blob_minsize,
orientation="h",
key=self.gui_blob_minsize,
background_color="#424042",
tooltip="Minimum size a blob has to be for blob tracking.",
),
sg.Text("Max Blob Size:", background_color="#424042"),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_blob_maxsize,
orientation="h",
key=self.gui_blob_maxsize,
background_color="#424042",
tooltip="Maximum size a blob can be for blob tracking.",
),
],
[
sg.Text("Right Eye Thresh:", background_color="#424042"),
sg.Slider(
range=(1, 120),
default_value=self.config.gui_legacy_ransac_thresh_right,
orientation="h",
key=self.gui_legacy_ransac_thresh_right,
background_color="#424042",
tooltip="Threshold for right eye, legacy RANSAC only",
),
sg.Text("Left Eye Thresh:", background_color="#424042"),
sg.Slider(
range=(1, 120),
default_value=self.config.gui_legacy_ransac_thresh_left,
orientation="h",
key=self.gui_legacy_ransac_thresh_left,
background_color="#424042",
tooltip="Threshold for left eye, legacy RANSAC only",
),
],
]

View File

@ -0,0 +1,68 @@
from typing import Iterable, Optional, NamedTuple, Any
import pydantic
from pydantic import BaseModel
from config import EyeTrackSettingsConfig
class MissingValidationModelException(Exception):
pass
class MissingValidationConfigException(Exception):
pass
class BaseValidationModel(BaseModel):
pass
class ValidationResult(NamedTuple):
changes: Optional[dict[str, str]]
errors: list[Any] # ErrorDict but we cannot import it, it's not exposed
class BaseSettingsModule:
def __init__(self, config: EyeTrackSettingsConfig, widget_id, **kwargs):
self.validation_model: BaseValidationModel = BaseValidationModel # noqa
self.config = config
self.widget_id = widget_id
def get_validation_model(self):
"""Return validation model, can be overridden for custom behaviour"""
return self.validation_model
def initialize_validation_model(self, values):
validation_model = self.get_validation_model()
if not validation_model:
raise MissingValidationModelException()
field_mapping = {}
for field in self.validation_model.model_fields.keys():
field_mapping[field] = values[getattr(self, field)]
return validation_model(**field_mapping)
def validate(self, values, raise_exception=False) -> Optional[ValidationResult]:
"""Return """
if not self.config:
raise MissingValidationConfigException()
try:
changes = {}
validated_model = self.initialize_validation_model(values)
for field, value in validated_model.model_dump().items():
if getattr(self.config, field) != value:
changes[field] = value
return ValidationResult(changes, [])
except pydantic.ValidationError as e:
if not raise_exception:
return ValidationResult(None, e.errors())
raise
def get_layout(self) -> Iterable:
raise NotImplementedError
def get_key_for_panel_defaults(self) -> dict:
return self.validation_model.schema().get("properties").keys()

View File

@ -0,0 +1,110 @@
from pydantic import AfterValidator
from typing_extensions import Annotated
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
import PySimpleGUI as sg
from settings.modules.CommonFieldValidators import check_is_float_convertible
class BlinkAlgoSettingsValidationModel(BaseValidationModel):
gui_IBO: bool
gui_RANSACBLINK: bool
gui_BLINK: bool
gui_LEAP_lid: bool
ibo_filter_samples: int
calibration_samples: int
ibo_fully_close_eye_threshold: Annotated[str, AfterValidator(check_is_float_convertible)]
gui_circular_crop_left: bool
gui_circular_crop_right: bool
leap_calibration_samples: int
class BlinkAlgoSettingsModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.validation_model = BlinkAlgoSettingsValidationModel
self.gui_IBO = f"-IBO{widget_id}-"
self.gui_RANSACBLINK = f"-RANSACBLINK{widget_id}-"
self.gui_BLINK = f"-BLINK{widget_id}-"
self.gui_LEAP_lid = f"-LEAPLID{widget_id}-"
self.ibo_filter_samples = f"-IBOFILTERSAMPLE{widget_id}-"
self.calibration_samples = f"-CALIBRATIONSAMPLES{widget_id}-"
self.ibo_fully_close_eye_threshold = f"-CLOSETHRESH{widget_id}-"
self.gui_circular_crop_left = f"-CIRCLECROPLEFT{widget_id}-"
self.gui_circular_crop_right = f"-CIRCLECROPRIGHT{widget_id}-"
self.leap_calibration_samples = f"-LEAPCALIBRATION{widget_id}-"
def get_layout(self):
return [
[sg.Text("Blink Algo Settings:", background_color="#242224")],
[
sg.Checkbox(
"Intensity Based Openness",
default=self.config.gui_IBO,
key=self.gui_IBO,
background_color="#424042",
),
sg.Checkbox(
"RANSAC Quick Blink Algo",
default=self.config.gui_RANSACBLINK,
key=self.gui_RANSACBLINK,
background_color="#424042",
),
sg.Checkbox(
"Binary Blink Algo",
default=self.config.gui_BLINK,
key=self.gui_BLINK,
background_color="#424042",
),
sg.Checkbox(
"LEAP Lid",
default=self.config.gui_LEAP_lid,
key=self.gui_LEAP_lid,
background_color="#424042",
),
],
[
sg.Text("LEAP Calibration Samples", background_color="#424042"),
sg.InputText(
self.config.leap_calibration_samples,
key=self.leap_calibration_samples,
size=(0, 10),
),
],
[
sg.Text("IBO Filter Sample Size", background_color="#424042"),
sg.InputText(
self.config.ibo_filter_samples,
key=self.ibo_filter_samples,
size=(0, 10),
),
sg.Text("Calibration Samples", background_color="#424042"),
sg.InputText(
self.config.calibration_samples,
key=self.calibration_samples,
size=(0, 10),
),
sg.Text("IBO Close Threshold", background_color="#424042"),
sg.InputText(
self.config.ibo_fully_close_eye_threshold,
key=self.ibo_fully_close_eye_threshold,
size=(0, 10),
),
],
[
sg.Checkbox(
"Left Eye Circle crop",
default=self.config.gui_circular_crop_left,
key=self.gui_circular_crop_left,
background_color="#424042",
),
sg.Checkbox(
"Right Eye Circle crop",
default=self.config.gui_circular_crop_right,
key=self.gui_circular_crop_right,
background_color="#424042",
),
],
]

View File

@ -0,0 +1,34 @@
import ipaddress
def check_is_float_convertible(v: str):
"""
Check if value provided can be converted to a float or double.
PySimpleGUI does not support floats or doubles in UI, so we have to make sure
that what the user typed in is correct
"""
try:
float(v)
return v
except ValueError:
raise ValueError("Please provide a proper number")
def try_convert_to_float(v: str):
"""
Check if value provided can be converted to a float and return converted result
Basically what `check_is_float_convertible` does but returns a float
"""
try:
return float(check_is_float_convertible(v))
except ValueError:
raise
def check_is_ip_address(v: str):
try:
ipaddress.IPv4Address(v)
return v
except ValueError:
raise ValueError("Please provide a valid IP Address")

View File

@ -0,0 +1,106 @@
from config import EyeTrackSettingsConfig
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
import PySimpleGUI as sg
class GeneralSettingsValidationModel(BaseValidationModel):
gui_flip_x_axis_left: bool
gui_flip_x_axis_right: bool
gui_flip_y_axis: bool
gui_outer_side_falloff: bool
gui_update_check: bool
gui_right_eye_dominant: bool
gui_left_eye_dominant: bool
gui_eye_dominant_diff_thresh: float
class GeneralSettingsModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.validation_model = GeneralSettingsValidationModel
self.gui_flip_x_axis_left = f"-FLIPXAXISLEFT{widget_id}-"
self.gui_flip_x_axis_right = f"-FLIPXAXISRIGHT{widget_id}-"
self.gui_flip_y_axis = f"-FLIPYAXIS{widget_id}-"
self.gui_outer_side_falloff = f"-EYEFALLOFF{widget_id}-"
self.gui_eye_dominant_diff_thresh = f"-DIFFTHRESH{widget_id}-"
self.gui_left_eye_dominant = f"-LEFTEYEDOMINANT{widget_id}-"
self.gui_right_eye_dominant = f"-RIGHTEYEDOMINANT{widget_id}-"
self.gui_update_check = f"-UPDATECHECK{widget_id}-"
# gui_right_eye_dominant: bool = False
# gui_left_eye_dominant: bool = False
# gui_outer_side_falloff: bool = True
# gui_eye_dominant_diff_thresh: float = 0.3
def get_layout(self):
return [
[
sg.Text("General Settings:", background_color="#242224"),
],
[
sg.Checkbox(
"Flip Left Eye X Axis",
default=self.config.gui_flip_x_axis_left,
key=self.gui_flip_x_axis_left,
background_color="#424042",
tooltip="Flips the left eye's X axis.",
),
sg.Checkbox(
"Flip Right Eye X Axis",
default=self.config.gui_flip_x_axis_right,
key=self.gui_flip_x_axis_right,
background_color="#424042",
tooltip="Flips the right eye's X axis.",
),
sg.Checkbox(
"Flip Y Axis",
default=self.config.gui_flip_y_axis,
key=self.gui_flip_y_axis,
background_color="#424042",
tooltip="Flips the eye's Y axis.",
),
],
[
sg.Checkbox(
"Check For Updates",
default=self.config.gui_update_check,
key=self.gui_update_check,
background_color="#424042",
tooltip="Toggle update check on launch.",
),
],
[
sg.Text("Eye Falloff Settings:", background_color="#242224"),
],
[
sg.Checkbox(
"Outer Eye Falloff",
default=self.config.gui_outer_side_falloff,
key=self.gui_outer_side_falloff,
background_color="#424042",
tooltip="If one eye's tracking is past a threshold of difference, we assume the eye looking most outward with lowest average velocity in the past x seconds is correct.",
),
sg.Text("Eye Difference Threshold", background_color="#424042"),
sg.InputText(
self.config.gui_eye_dominant_diff_thresh,
key=self.gui_eye_dominant_diff_thresh,
size=(0, 10),
),
],
[
sg.Checkbox(
"Force Left Eye Dominant",
default=self.config.gui_left_eye_dominant,
key=self.gui_left_eye_dominant,
background_color="#424042",
tooltip="If one eye is too different than the other, use left eye data",
),
sg.Checkbox(
"Force Right Eye Dominant",
default=self.config.gui_right_eye_dominant,
key=self.gui_right_eye_dominant,
background_color="#424042",
tooltip="If one eye is too different than the other, use right eye data",
),
],
]

View File

@ -0,0 +1,137 @@
from pydantic import model_validator
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
from settings.constants import BACKGROUND_COLOR
import PySimpleGUI as sg
class OSCValidationModel(BaseValidationModel):
gui_osc_port: int
gui_osc_address: str
gui_ROSC: bool
gui_osc_receiver_port: int
gui_osc_recenter_address: str
gui_osc_recalibrate_address: str
gui_vrc_native: bool
gui_osc_vrcft_v1: bool
gui_osc_vrcft_v2: bool
gui_use_module: bool
@model_validator(mode="after")
def check_osc_vrcft_versions(self):
if self.gui_osc_vrcft_v1 and self.gui_osc_vrcft_v2:
raise ValueError("Only one version of VRCFT params can be turned on")
return self
@model_validator(mode="after")
def check_osc_output_mode(self):
if self.gui_vrc_native and any([self.gui_osc_vrcft_v1, self.gui_osc_vrcft_v2]):
raise ValueError("Either VRCNative or VRCFT output can be active at a time")
return self
class OSCSettingsModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.validation_model = OSCValidationModel
self.gui_osc_address = f"-OSCADDRESS{widget_id}-"
self.gui_osc_port = f"-OSCPORT{widget_id}-"
self.gui_ROSC = f"-ROSC{widget_id}-"
self.gui_osc_receiver_port = f"OSCRECEIVERPORT{widget_id}-"
self.gui_osc_recenter_address = f"OSCRECENTERADDRESS{widget_id}-"
self.gui_osc_recalibrate_address = f"OSCRECALIBRATEADDRESS{widget_id}-"
self.gui_vrc_native = f"-VRCNATIVE{widget_id}-"
self.gui_osc_vrcft_v1 = f"-OSCVRCFTV1{widget_id}-"
self.gui_osc_vrcft_v2 = f"-OSCVRCFTV2{widget_id}-"
self.gui_use_module = f"-OSCUSEMODULE{widget_id}-"
def get_layout(self):
return [
[
sg.Text("OSC Settings:", background_color="#242224"),
],
[
sg.Checkbox(
"Use ETVR VRCFT Module",
default=self.config.gui_use_module,
key=self.gui_use_module,
background_color="#424042",
tooltip="Toggle output to VRCFT Module or just regular OSC port",
),
],
[
sg.Checkbox(
"VRC Native Eyetracking",
default=self.config.gui_vrc_native,
key=self.gui_vrc_native,
background_color="#424042",
tooltip="Toggle VRCFT output or VRC native",
),
sg.Checkbox(
"VRCFT v1",
default=self.config.gui_osc_vrcft_v1,
key=self.gui_osc_vrcft_v1,
background_color="#424042",
tooltip="Toggle VRCFT's v1 Eyetracking format.",
),
sg.Checkbox(
"VRCFT v2 (UE)",
default=self.config.gui_osc_vrcft_v2,
key=self.gui_osc_vrcft_v2,
background_color="#424042",
tooltip="Toggle VRCFT's v2 (UE) Eyetracking format.",
),
],
[
sg.Text("Address:", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_osc_address,
key=self.gui_osc_address,
size=(0, 20),
tooltip="IP address we send OSC data to.",
),
sg.Text("Port:", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_osc_port,
key=self.gui_osc_port,
size=(0, 10),
tooltip="OSC port we send data to.",
),
],
[
sg.Text("Receive functions", background_color=BACKGROUND_COLOR),
sg.Checkbox(
"",
default=self.config.gui_ROSC,
key=self.gui_ROSC,
background_color=BACKGROUND_COLOR,
size=(0, 10),
tooltip="Toggle OSC receive functions.",
),
],
[
sg.Text("Receiver Port:", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_osc_receiver_port,
key=self.gui_osc_receiver_port,
size=(0, 10),
tooltip="Port we receive OSC data from (used to recalibrate or recenter app from within VRChat.",
),
sg.Text("Recenter Address:", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_osc_recenter_address,
key=self.gui_osc_recenter_address,
size=(0, 10),
tooltip="OSC Address used for recentering your eye.",
),
],
[
sg.Text("Recalibrate Address:", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_osc_recalibrate_address,
key=self.gui_osc_recalibrate_address,
size=(0, 10),
tooltip="OSC address we use for recalibrating your eye",
),
],
]

View File

@ -0,0 +1,42 @@
from pydantic import AfterValidator
from typing_extensions import Annotated
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
from settings.constants import BACKGROUND_COLOR
import PySimpleGUI as sg
from settings.modules.CommonFieldValidators import check_is_float_convertible
class OneEuroFilterValidationModel(BaseValidationModel):
gui_speed_coefficient: Annotated[str, AfterValidator(check_is_float_convertible)]
gui_min_cutoff: Annotated[str, AfterValidator(check_is_float_convertible)]
class OneEuroSettingsModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.gui_speed_coefficient = f"-SPEEDCOEFFICIENT{widget_id}-"
self.gui_min_cutoff = f"-MINCUTOFF{widget_id}-"
self.validation_model = OneEuroFilterValidationModel
def get_layout(self):
return [
[
sg.Text("One Euro Filter Paramaters:", background_color='#242224'),
],
[
sg.Text("Min Frequency Cutoff", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_min_cutoff,
key=self.gui_min_cutoff,
size=(0, 10),
),
sg.Text("Speed Coefficient", background_color=BACKGROUND_COLOR),
sg.InputText(
self.config.gui_speed_coefficient,
key=self.gui_speed_coefficient,
size=(0, 10),
),
],
]

View File

@ -0,0 +1,238 @@
from pydantic import model_validator
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
import PySimpleGUI as sg
class TrackingAlgorithmValidationModel(BaseValidationModel):
gui_BLOB: bool
gui_DADDY: bool
gui_HSF: bool
gui_HSRAC: bool
gui_AHSF: bool
gui_LEAP: bool
gui_RANSAC3D: bool
gui_AHSFRAC: bool
gui_legacy_ransac: bool
gui_BLOBP: int
gui_DADDYP: int
gui_AHSFRACP: int
gui_HSFP: int
gui_AHSFP: int
gui_HSRACP: int
gui_LEAPP: int
gui_RANSAC3DP: int
@model_validator(mode="after")
def check_algorith_order(self):
algos_list = [
self.gui_AHSFP,
self.gui_BLOBP,
self.gui_DADDYP,
self.gui_HSFP,
self.gui_HSRACP,
self.gui_LEAPP,
self.gui_RANSAC3DP,
self.gui_AHSFRACP,
]
algos_set = set(algos_list)
if len(algos_set) != len(algos_list):
raise ValueError("Please fixup the algorithm order, some algos are doubled")
return self
class TrackingAlgorithmModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.algo_count = ["1", "2", "3", "4", "5", "6", "7", "8"]
self.validation_model = TrackingAlgorithmValidationModel
self.gui_BLOB = f"-BLOBFALLBACK{widget_id}-"
self.gui_DADDY = f"-DADDY{widget_id}-"
self.gui_HSF = f"-HSF{widget_id}-"
self.gui_HSRAC = f"-HSRAC{widget_id}-"
self.gui_LEAP = f"-LEAP{widget_id}-"
self.gui_AHSF = f"-AHSF{widget_id}-"
self.gui_AHSFRAC = f"-gui_AHSFRAC{widget_id}-"
self.gui_RANSAC3D = f"-RANSAC3D{widget_id}-"
self.gui_legacy_ransac = f"-LEGACYRANSACTHRESH{widget_id}-"
self.gui_BLOBP = f"-BLOBP{widget_id}-"
self.gui_DADDYP = f"-DADDYP{widget_id}-"
self.gui_HSFP = f"-HSFP{widget_id}-"
self.gui_HSRACP = f"-HSRACP{widget_id}-"
self.gui_LEAPP = f"-LEAPP{widget_id}-"
self.gui_AHSFP = f"-AHSFP{widget_id}-"
self.gui_RANSAC3DP = f"-RANSAC3DP{widget_id}-"
self.gui_AHSFRACP = f"-gui_AHSFRACP{widget_id}-"
# TODO custom validation, make a set of values, count if there's less than overall, if yeah we have a problem
def get_layout(self):
return [
[
sg.Text(
"Tracking Algorithm Order Settings:",
background_color="#242224",
)
],
[
sg.Checkbox(
"",
default=self.config.gui_AHSFRAC,
key=self.gui_AHSFRAC,
background_color="#424042",
tooltip="Flagship hybrid algo",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_AHSFRACP,
key=self.gui_AHSFRACP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("ASHSFRAC", background_color="#424042"),
sg.Checkbox(
"",
default=self.config.gui_AHSF,
key=self.gui_AHSF,
background_color="#424042",
tooltip="Newer version of HSF",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_AHSFP,
key=self.gui_AHSFP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("ASHSF", background_color="#424042"),
],
[
sg.Checkbox(
"",
default=self.config.gui_HSRAC,
key=self.gui_HSRAC,
background_color="#424042",
tooltip="Our flagship algorithm, utilizing both HSF and RANSAC for best tracking quality and lighting resistance.",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_HSRACP,
key=self.gui_HSRACP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("HSRAC", background_color="#424042"),
sg.Checkbox(
"",
default=self.config.gui_HSF,
key=self.gui_HSF,
background_color="#424042",
tooltip="HSF Is a new, lower resolution tracking algorithim that provides excelent resilancy to lighting conditions and great speed.",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_HSFP,
key=self.gui_HSFP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithims.",
),
sg.Text("Haar Surround Feature", background_color="#424042"),
],
[
sg.Checkbox(
"",
default=self.config.gui_DADDY,
key=self.gui_DADDY,
background_color="#424042",
tooltip="DADDY Uses a Deep learning algorithm. This has a big CPU usage impact.",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_DADDYP,
key=self.gui_DADDYP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("DADDY", background_color="#424042"),
# ],
# [
sg.Checkbox(
"",
default=self.config.gui_RANSAC3D,
key=self.gui_RANSAC3D,
background_color="#424042",
tooltip="RANSAC3D provides good tracking quality, however does not do well in bad lighting conditions.",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_RANSAC3DP,
key=self.gui_RANSAC3DP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("RANSAC 3D", background_color="#424042"),
sg.Checkbox(
"Legacy RANSAC Thresh",
default=self.config.gui_legacy_ransac,
key=self.gui_legacy_ransac,
background_color="#424042",
),
],
[
sg.Checkbox(
"",
default=self.config.gui_LEAP,
key=self.gui_LEAP,
background_color="#424042",
tooltip="LEAP Uses a lightweight deep learning algorithm.",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_LEAPP,
key=self.gui_LEAPP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("LEAP", background_color="#424042"),
sg.Checkbox(
"",
default=self.config.gui_BLOB,
key=self.gui_BLOB,
background_color="#424042",
tooltip="Blob tracking is the oldest and worst tracking algorithm, it provides fast, though sometimes inaccurate tracking.",
),
sg.Combo(
self.algo_count,
default_value=self.config.gui_BLOBP,
key=self.gui_BLOBP,
background_color="#424042",
text_color="white",
button_arrow_color="black",
button_background_color="#6f4ca1",
tooltip="Select the priority of eyetracking algorithms.",
),
sg.Text("Blob", background_color="#424042"),
],
]

View File

@ -0,0 +1,225 @@
from typing import Iterable
import PySimpleGUI as sg
from pydantic import AfterValidator
from typing_extensions import Annotated
from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel
from settings.modules.CommonFieldValidators import check_is_ip_address, try_convert_to_float
class VRCFTSettingsModuleValidationModel(BaseValidationModel):
gui_VRCFTModulePort: int
gui_VRCFTModuleIPAddress: Annotated[str, AfterValidator(check_is_ip_address)]
gui_ShouldEmulateEyeWiden: bool
gui_ShouldEmulateEyeSquint: bool
gui_ShouldEmulateEyebrows: bool
gui_WidenThresholdV1_min: float
gui_WidenThresholdV1_max: float
gui_WidenThresholdV2_min: float
gui_WidenThresholdV2_max: float
gui_SqueezeThresholdV1_min: float
gui_SqueezeThresholdV1_max: float
gui_SqueezeThresholdV2_min: float
gui_SqueezeThresholdV2_max: float
gui_EyebrowThresholdRising: float
gui_EyebrowThresholdLowering: float
# this is a hack. I don't like it, but that's what I gotta do to make both, Pydantic and PySimpleGUI happy
gui_OutputMultiplier: Annotated[float, AfterValidator(try_convert_to_float)]
class VRCFTSettingsModule(BaseSettingsModule):
def __init__(self, config, widget_id, **kwargs):
super().__init__(config=config, widget_id=widget_id, **kwargs)
self.validation_model = VRCFTSettingsModuleValidationModel
self.gui_VRCFTModulePort = f"-VRCFTSETTINGSPORTNUMBER{widget_id}"
self.gui_VRCFTModuleIPAddress = f"-VRCFTSETTINGSIPNUMBER{widget_id}"
self.gui_ShouldEmulateEyeWiden = f"-VRCFTSETTINGSEMULATEWIDEN{widget_id}"
self.gui_ShouldEmulateEyeSquint = f"-VRCFTSETTINGSEMULATEEYEWIDEN{widget_id}"
self.gui_ShouldEmulateEyebrows = f"-VRCFTSETTINGSEMULATEEYEBROWS{widget_id}"
self.gui_WidenThresholdV1_min = f"-VRCFTSETTINGSWIDENTHRESHOLDV1MIN{widget_id}"
self.gui_WidenThresholdV1_max = f"-VRCFTSETTINGSWIDENTHRESHOLDV1MAX{widget_id}"
self.gui_WidenThresholdV2_min = f"-VRCFTSETTINGSWIDENTHRESHOLDV2MIN{widget_id}"
self.gui_WidenThresholdV2_max = f"-VRCFTSETTINGSWIDENTHRESHOLDV2MAX{widget_id}"
self.gui_SqueezeThresholdV1_min = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV1MIN{widget_id}"
self.gui_SqueezeThresholdV1_max = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV1MAX{widget_id}"
self.gui_SqueezeThresholdV2_min = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV2MIN{widget_id}"
self.gui_SqueezeThresholdV2_max = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV2MAX{widget_id}"
self.gui_EyebrowThresholdRising = f"-VRCFTSETTINGSEYEBROWTHRESHOLDRISING{widget_id}"
self.gui_EyebrowThresholdLowering = f"-VRCFTSETTINGSEYEBROWTHRESHOLDLOWERING{widget_id}"
self.gui_OutputMultiplier = f"-VRCFTSETTINGSOUTPUTMULTIPLIER{widget_id}"
def get_layout(self) -> Iterable:
return [
[
sg.Text("Emulation selection:", background_color="#242224"),
],
[
sg.Checkbox(
"Emulate Eye Widen",
default=self.config.gui_ShouldEmulateEyeWiden,
key=self.gui_ShouldEmulateEyeWiden,
background_color="#424042",
),
sg.Checkbox(
"Emulate Eye Squint",
default=self.config.gui_ShouldEmulateEyeSquint,
key=self.gui_ShouldEmulateEyeSquint,
background_color="#424042",
),
sg.Checkbox(
"Emulate Eyebrows",
default=self.config.gui_ShouldEmulateEyebrows,
key=self.gui_ShouldEmulateEyebrows,
background_color="#424042",
),
],
[
sg.Text("General Module Settings:", background_color="#242224"),
],
[
sg.Text("VRCFT Module listening IP", background_color="#242224"),
sg.InputText(
self.config.gui_VRCFTModuleIPAddress,
key=self.gui_VRCFTModuleIPAddress,
size=(0, 10),
tooltip="Ip on which the module should listen.",
),
sg.Text("port", background_color="#242224"),
sg.InputText(
self.config.gui_VRCFTModulePort,
key=self.gui_VRCFTModulePort,
size=(0, 10),
tooltip="UDP port on which the module should listen.",
),
],
[
sg.Text("VRCFT Module output multiplier", background_color="#242224"),
sg.InputText(
self.config.gui_OutputMultiplier,
key=self.gui_OutputMultiplier,
size=(0, 10),
tooltip="Output multiplier adjusts the output by the given amount",
),
],
[
sg.Text("Eye Widen thresholds:", background_color="#424042"),
],
[
sg.Text("V1 Min:", background_color="#424042"),
sg.Slider(
range=(0, 1),
resolution=0.01,
default_value=self.config.gui_WidenThresholdV1_min,
orientation="h",
key=self.gui_WidenThresholdV1_min,
background_color="#424042",
tooltip="Controls the point at which the emulation should start for v1 params, reacts to openness",
),
sg.Text("V1 Max:", background_color="#424042"),
sg.Slider(
range=(0, 2),
resolution=0.01,
default_value=self.config.gui_WidenThresholdV1_max,
orientation="h",
key=self.gui_WidenThresholdV1_max,
background_color="#424042",
tooltip="Controls the maximum range of widen emulation",
),
],
[
sg.Text("V2 Min:", background_color="#424042"),
sg.Slider(
range=(0, 2),
resolution=0.01,
default_value=self.config.gui_WidenThresholdV2_min,
orientation="h",
key=self.gui_WidenThresholdV2_min,
background_color="#424042",
tooltip="Controls the point at which the emulation should start for v2 params, reacts to openness",
),
sg.Text("V2 Max:", background_color="#424042"),
sg.Slider(
range=(0, 2),
resolution=0.01,
default_value=self.config.gui_WidenThresholdV2_max,
orientation="h",
key=self.gui_WidenThresholdV2_max,
background_color="#424042",
tooltip="Controls the maximum range of widen emulation",
),
],
[
sg.Text("Eye Squeeze thresholds:", background_color="#424042"),
],
[
sg.Text("V1 Min:", background_color="#424042"),
sg.Slider(
range=(0, 1),
resolution=0.01,
default_value=self.config.gui_SqueezeThresholdV1_min,
orientation="h",
key=self.gui_SqueezeThresholdV1_min,
background_color="#424042",
tooltip="Controls the point at which the emulation should start for v1 params, reacts to openness",
),
sg.Text("V1 Max:", background_color="#424042"),
sg.Slider(
range=(0, 2),
resolution=0.01,
default_value=self.config.gui_SqueezeThresholdV1_max,
orientation="h",
key=self.gui_SqueezeThresholdV1_max,
background_color="#424042",
tooltip="Controls the maximum range of squeeze emulation",
),
],
[
sg.Text("V2 Min:", background_color="#424042"),
sg.Slider(
range=(0, 1),
resolution=0.01,
default_value=self.config.gui_SqueezeThresholdV2_min,
orientation="h",
key=self.gui_SqueezeThresholdV2_min,
background_color="#424042",
tooltip="Controls the point at which the emulation should start for v2 params, reacts to openness",
),
sg.Text("V2 Max:", background_color="#424042"),
sg.Slider(
range=(-2, 0),
resolution=0.01,
default_value=self.config.gui_SqueezeThresholdV2_max,
orientation="h",
key=self.gui_SqueezeThresholdV2_max,
background_color="#424042",
tooltip="Controls the maximum range of squeeze emulation",
),
],
[
sg.Text("Eyebrow emulation Thresholds:", background_color="#424042"),
],
[
sg.Text("Rising:", background_color="#424042"),
sg.Slider(
range=(0, 1),
resolution=0.01,
default_value=self.config.gui_EyebrowThresholdRising,
orientation="h",
key=self.gui_EyebrowThresholdRising,
background_color="#424042",
tooltip="Controls the point at which the emulation should start, reacts to openness",
),
sg.Text("Lowering:", background_color="#424042"),
sg.Slider(
range=(0, 2),
resolution=0.01,
default_value=self.config.gui_EyebrowThresholdLowering,
orientation="h",
key=self.gui_EyebrowThresholdLowering,
background_color="#424042",
tooltip="Controls the maximum range of eyebrows emulation",
),
],
]

View File

@ -1,257 +0,0 @@
import PySimpleGUI as sg
from config import EyeTrackSettingsConfig
from threading import Event, Thread
from eye_processor import EyeProcessor, InformationOrigin
from enum import Enum
from queue import Queue, Empty
from camera import Camera, CameraState
import cv2
from osc import EyeId
class SettingsWidget:
def __init__(self, widget_id: EyeId, main_config: EyeTrackSettingsConfig, osc_queue: Queue):
self.gui_flip_x_axis_left = f"-FLIPXAXISLEFT{widget_id}-"
self.gui_flip_x_axis_right = f"-FLIPXAXISRIGHT{widget_id}-"
self.gui_flip_y_axis = f"-FLIPYAXIS{widget_id}-"
self.gui_general_settings_layout = f"-GENERALSETTINGSLAYOUT{widget_id}-"
self.gui_osc_address = f"-OSCADDRESS{widget_id}-"
self.gui_osc_port = f"-OSCPORT{widget_id}-"
self.gui_osc_receiver_port = f"OSCRECEIVERPORT{widget_id}-"
self.gui_osc_recenter_address = f"OSCRECENTERADDRESS{widget_id}-"
self.gui_osc_recalibrate_address = f"OSCRECALIBRATEADDRESS{widget_id}-"
self.gui_blob_fallback = f"-BLOBFALLBACK{widget_id}-"
self.gui_blob_maxsize = f"-BLOBMAXSIZE{widget_id}-"
self.gui_blob_minsize = f"-BLOBMINSIZE{widget_id}-"
self.gui_speed_coefficient = f"-SPEEDCOEFFICIENT{widget_id}-"
self.gui_min_cutoff = f"-MINCUTOFF{widget_id}-"
self.gui_eye_falloff = f"-EYEFALLOFF{widget_id}-"
self.gui_blink_sync = f"-BLINKSYNC{widget_id}-"
self.main_config = main_config
self.config = main_config.settings
self.osc_queue = osc_queue
# Define the window's contents
self.general_settings_layout = [
[
sg.Checkbox(
"Flip Left Eye X Axis",
default=self.config.gui_flip_x_axis_left,
key=self.gui_flip_x_axis_left,
background_color='#424042',
),
sg.Checkbox(
"Flip Right Eye X Axis",
default=self.config.gui_flip_x_axis_right,
key=self.gui_flip_x_axis_right,
background_color='#424042',
),
],
[sg.Checkbox(
"Flip Y Axis",
default=self.config.gui_flip_y_axis,
key=self.gui_flip_y_axis,
background_color='#424042',
),
],
[sg.Checkbox(
"Dual Eye Falloff",
default=self.config.gui_eye_falloff,
key=self.gui_eye_falloff,
background_color='#424042',
),
],
[sg.Checkbox(
"Sync Blinks (disables winking)",
default=self.config.gui_blink_sync,
key=self.gui_blink_sync,
background_color='#424042',
),
],
[
sg.Text("Tracking Algorithim Settings:", background_color='#242224'),
],
[sg.Checkbox(
"Blob Fallback",
default=self.config.gui_blob_fallback,
key=self.gui_blob_fallback,
background_color='#424042',
),
],
[
sg.Text("Min blob size:", background_color='#424042'),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_blob_minsize,
orientation="h",
key=self.gui_blob_minsize,
background_color='#424042'
),
sg.Text("Max blob size:", background_color='#424042'),
sg.Slider(
range=(1, 50),
default_value=self.config.gui_blob_maxsize,
orientation="h",
key=self.gui_blob_maxsize,
background_color='#424042'
),
],
[
sg.Text("Filter Paramaters:", background_color='#242224'),
],
[
sg.Text("Min Frequency Cutoff", background_color='#424042'),
sg.InputText(self.config.gui_min_cutoff, key=self.gui_min_cutoff),
],
[
sg.Text("Speed Coefficient", background_color='#424042'),
sg.InputText(self.config.gui_speed_coefficient, key=self.gui_speed_coefficient),
],
[
sg.Text("OSC Settings:", background_color='#242224'),
],
[
sg.Text("OSC Address:", background_color='#424042'),
sg.InputText(self.config.gui_osc_address, key=self.gui_osc_address),
],
[
sg.Text("OSC Port:", background_color='#424042'),
sg.InputText(self.config.gui_osc_port, key=self.gui_osc_port),
],
[
sg.Text("OSC Receiver Port:", background_color='#424042'),
sg.InputText(self.config.gui_osc_receiver_port, key=self.gui_osc_receiver_port),
],
[
sg.Text("OSC Recenter Address:", background_color='#424042'),
sg.InputText(self.config.gui_osc_recenter_address, key=self.gui_osc_recenter_address),
],
[
sg.Text("OSC Recalibrate Address:", background_color='#424042'),
sg.InputText(self.config.gui_osc_recalibrate_address, key=self.gui_osc_recalibrate_address),
]
]
self.widget_layout = [
[
sg.Text("General Settings:", background_color='#242224'),
],
[
sg.Column(self.general_settings_layout, key=self.gui_general_settings_layout, background_color='#424042' ),
],
# [
# sg.Button(
# "Save Settings", key=self.gui_save_button, button_color = '#6f4ca1'
# ),
#],
]
self.cancellation_event = Event() # Set the event until start is called, otherwise we can block if shutdown is called.
self.cancellation_event.set()
self.image_queue = Queue()
def started(self):
return not self.cancellation_event.is_set()
def start(self):
# If we're already running, bail
if not self.cancellation_event.is_set():
return
self.cancellation_event.clear()
def stop(self):
# If we're not running yet, bail
if self.cancellation_event.is_set():
return
self.cancellation_event.set()
def render(self, window, event, values):
# If anything has changed in our configuration settings, change/update those.
changed = False
if self.config.gui_osc_port != values[self.gui_osc_port]:
try:
int(values[self.gui_osc_port])
if len(values[self.gui_osc_port]) <= 5:
self.config.gui_osc_port = int(values[self.gui_osc_port])
changed = True
else:
print("[ERROR] OSC port value must be an integer 0-65535")
except:
print("[ERROR] OSC port value must be an integer 0-65535")
if self.config.gui_osc_receiver_port != values[self.gui_osc_receiver_port]:
try:
int(values[self.gui_osc_receiver_port])
if len(values[self.gui_osc_receiver_port]) <= 5:
self.config.gui_osc_receiver_port = int(values[self.gui_osc_receiver_port])
changed = True
else:
print("[ERROR] OSC receive port value must be an integer 0-65535")
except:
print("[ERROR] OSC receive port value must be an integer 0-65535")
if self.config.gui_osc_address != values[self.gui_osc_address]:
self.config.gui_osc_address = values[self.gui_osc_address]
changed = True
if self.config.gui_osc_recenter_address != values[self.gui_osc_recenter_address]:
self.config.gui_osc_recenter_address = values[self.gui_osc_recenter_address]
changed = True
if self.config.gui_osc_recalibrate_address != values[self.gui_osc_recalibrate_address]:
self.config.gui_osc_recalibrate_address = values[self.gui_osc_recalibrate_address]
changed = True
if self.config.gui_min_cutoff != values[self.gui_min_cutoff]:
self.config.gui_min_cutoff = values[self.gui_min_cutoff]
changed = True
if self.config.gui_speed_coefficient != values[self.gui_speed_coefficient]:
self.config.gui_speed_coefficient = values[self.gui_speed_coefficient]
changed = True
if self.config.gui_flip_x_axis_right != values[self.gui_flip_x_axis_right]:
self.config.gui_flip_x_axis_right = values[self.gui_flip_x_axis_right]
changed = True
if self.config.gui_flip_x_axis_left != values[self.gui_flip_x_axis_left]:
self.config.gui_flip_x_axis_left = values[self.gui_flip_x_axis_left]
changed = True
if self.config.gui_flip_y_axis != values[self.gui_flip_y_axis]:
self.config.gui_flip_y_axis = values[self.gui_flip_y_axis]
changed = True
if self.config.gui_blob_fallback != values[self.gui_blob_fallback]:
self.config.gui_blob_fallback = values[self.gui_blob_fallback]
changed = True
if self.config.gui_eye_falloff != values[self.gui_eye_falloff]:
self.config.gui_eye_falloff = values[self.gui_eye_falloff]
changed = True
if self.config.gui_blink_sync != values[self.gui_blink_sync]:
self.config.gui_blink_sync = values[self.gui_blink_sync]
changed = True
if self.config.gui_blob_maxsize != values[self.gui_blob_maxsize]:
self.config.gui_blob_maxsize = values[self.gui_blob_maxsize]
changed = True
if changed:
self.main_config.save()
self.osc_queue.put((EyeId.SETTINGS))

View File

View File

@ -0,0 +1,130 @@
# calibration_module.py
import numpy as np
class CalibrationProcessor:
def __init__(self):
self.left_eye_data = None
self.right_eye_data = None
self.P_left = None
self.P_right = None
self.gt_3d = np.array([
(0.8, 0.8, 1), (0, 0.8, 1), (-0.8, 0.8, 1), (0.8, 0, 1), (0, 0, 1),
(-0.8, 0, 1), (0.8, -0.8, 1), (0, -0.8, 1), (-0.8, -0.8, 1)
])
def estimate_projection_matrix(self, eye_data, gt_3d):
# Ensure the input data is a numpy array
eye_data = np.array(eye_data)
gt_3d = np.array(gt_3d)
# Append ones for homogeneous coordinates
gt_3d_h = np.hstack((gt_3d, np.ones((gt_3d.shape[0], 1))))
eye_data_h = np.hstack((eye_data, np.ones((eye_data.shape[0], 1))))
# Debug: Print the shapes of the matrices
print("Shape of gt_3d_h:", gt_3d_h.shape)
print("Shape of eye_data_h:", eye_data_h.shape)
# Solve for the projection matrix using least squares
P, _, _, _ = np.linalg.lstsq(gt_3d_h, eye_data_h, rcond=None)
return P
def receive_calibration_data(self, eye_id, data):
if eye_id == 1:
self.left_eye_data = data
elif eye_id == 0:
self.right_eye_data = data
# print('receive',len(self.left_eye_data), self.left_eye_data, self.right_eye_data, data, eye_id)
# Check if both sets of data have been received
if self.left_eye_data is not None and self.right_eye_data is not None:
if len(self.left_eye_data) == 8 and len(self.right_eye_data) == 8:
self.process_calibration_data()
def process_calibration_data(self):
# Ensure both data are present
if self.left_eye_data is None or self.right_eye_data is None:
raise ValueError("Calibration data for both eyes must be provided")
print("Processing calibration data for both eyes...")
print(f"Left Eye Data: {self.left_eye_data}")
print(f"Right Eye Data: {self.right_eye_data}")
self.left_eye_data = np.array(self.left_eye_data)
self.right_eye_data = np.array(self.right_eye_data)
if len(self.left_eye_data) != len(self.gt_3d):
raise ValueError(
f"Number of left eye points ({len(self.left_eye_data)}) does not match number of 3D points ({len(self.gt_3d)}).")
if len(self.right_eye_data) != len(self.gt_3d):
raise ValueError(
f"Number of right eye points ({len(self.right_eye_data)}) does not match number of 3D points ({len(self.gt_3d)}).")
# After processing, reset the data
# self.left_eye_data = None
# self.right_eye_data = None
# Function to compute the 3D gaze direction from 2D points
def compute_gaze_direction(self, P, point_2d):
print(P, point_2d)
# Convert 2D point to homogeneous coordinates
point_2d_h = np.append(point_2d, 1)
# Solve for 3D direction (Ax = b, where A is the projection matrix and b is the 2D point)
direction, _, _, _ = np.linalg.lstsq(P[:, :-1], point_2d_h, rcond=None)
direction /= np.linalg.norm(direction)
return direction
# Compute the convergence point given 2D points for both eyes
def compute_convergence_point(self, left_point_2d, right_point_2d, P_left, P_right, IPD):
left_eye_pos = np.array([-IPD / 2, 0, 0])
right_eye_pos = np.array([IPD / 2, 0, 0])
gaze_left = self.compute_gaze_direction(P_left, left_point_2d)
gaze_right = self.compute_gaze_direction(P_right, right_point_2d)
# Parameterize the gaze directions as lines
def line_parametric_form(point, direction, t):
return point + t * direction
# Find the closest point between two lines
t_values = np.linspace(-10, 10, 1000)
min_distance = float('inf')
best_point = None
for t1 in t_values:
for t2 in t_values:
point1 = line_parametric_form(left_eye_pos, gaze_left, t1)
point2 = line_parametric_form(right_eye_pos, gaze_right, t2)
distance = np.linalg.norm(point1 - point2)
if distance < min_distance:
min_distance = distance
best_point = (point1 + point2) / 2
return best_point
def set_P(self):
self.P_left = self.estimate_projection_matrix(self.left_eye_data, self.gt_3d)
self.P_right = self.estimate_projection_matrix(self.right_eye_data, self.gt_3d)
# Global instance of CalibrationProcessor
calibration_processor = CalibrationProcessor()
def receive_calibration_data(data, eye_id):
global calibration_processor
calibration_processor.receive_calibration_data(eye_id, data)
def converge_3d():
IPD = 0.058
left_point_2d = (120, 100)
right_point_2d = (118, 65)
# estimate_projection_matrix
calibration_processor.set_P()
convergence_point = calibration_processor.compute_convergence_point(left_point_2d, right_point_2d, calibration_processor.P_left, calibration_processor.P_right, IPD)
print(f"Convergence Point: {convergence_point}")

View File

@ -0,0 +1,45 @@
import numpy as np
from eye import EyeId
def velocity_falloff(self, var, out_x, out_y):
if (
self.settings.gui_right_eye_dominant
or self.settings.gui_left_eye_dominant
or self.settings.gui_outer_side_falloff
):
# Calculate the distance between the two eyes
dist = np.sqrt(np.square(var.l_eye_x - var.r_eye_x) + np.square(var.left_y - var.right_y))
if self.eye_id == EyeId.LEFT:
var.l_eye_x = out_x
var.left_y = out_y
if self.eye_id == EyeId.RIGHT:
var.r_eye_x = out_x
var.right_y = out_y
# Check if the distance is greater than the threshold
if dist > self.settings.gui_eye_dominant_diff_thresh:
if self.settings.gui_right_eye_dominant:
out_x, out_y = var.r_eye_x, var.right_y
elif self.settings.gui_left_eye_dominant:
out_x, out_y = var.l_eye_x, var.left_y
else:
# If the distance is too large, identify the eye with the lower velocity
if var.l_eye_velocity < var.r_eye_velocity:
# Mirror the position of the eye with lower velocity to the other eye
out_x, out_y = var.r_eye_x, var.right_y
else:
# Mirror the position of the eye with lower velocity to the other eye
out_x, out_y = var.l_eye_x, var.left_y
else:
# If the distance is within the threshold, do not mirror the eyes
pass
else:
pass
return out_x, out_y

View File

@ -0,0 +1,47 @@
import cv2
import numpy as np
def safe_crop(img, x, y, x2, y2, keepsize=False):
try:
# The order of the arguments can be reconsidered.
img_h, img_w = img.shape[:2]
outimg = img[max(0, y) : min(img_h, y2), max(0, x) : min(img_w, x2)].copy()
reqsize_x, reqsize_y = abs(x2 - x), abs(y2 - y)
if keepsize and outimg.shape[:2] != (reqsize_y, reqsize_x):
# If the size is different from the expected size (smaller by the amount that is out of range)
outimg = cv2.resize(outimg, (reqsize_x, reqsize_y))
return outimg
except cv2.error as e:
if '!ssize.empty()' in str(e):
print("Image is None or has zero dimensions. Skipping resizing.")
else:
raise
def circle_crop(img, xc, yc, radius, cct):
avg_color_per_row = np.average(img, axis=0)
avg_color = np.average(avg_color_per_row, axis=0)
if cct <= 0:
try:
ht, wd = img.shape[:2]
if radius < 10: #minimum size
radius = 10
# draw filled circle in white on black background as mask
mask = np.zeros((ht, wd), dtype=np.uint8)
mask = cv2.circle(mask, (xc, yc), radius, 255, -1)
# create white colored background
color = np.full_like(img, (avg_color))
# apply mask to image
masked_img = cv2.bitwise_and(img, img, mask=mask)
# apply inverse mask to colored image
masked_color = cv2.bitwise_and(color, color, mask=255 - mask)
# combine the two masked images
outimg = cv2.add(masked_img, masked_color)
return outimg, cct
except:
return img, cct
pass
else:
cct = cct - 1
return img, cct

View File

@ -0,0 +1,82 @@
import os
import typing
import sys
from pathlib import Path
from typing import Union
is_nt = True if os.name == "nt" else False
def PlaySound(*args, **kwargs):
pass
SND_FILENAME = SND_ASYNC = 1
if is_nt:
import winsound
PlaySound = winsound.PlaySound
SND_FILENAME = winsound.SND_FILENAME
SND_ASYNC = winsound.SND_ASYNC
def clamp(x, low, high):
return max(low, min(x, high))
def lst_median(lst, ordered=False):
# https://github.com/emilianavt/OpenSeeFace/blob/6f24efc4f58eb7cca47ec2146d934eabcc207e46/remedian.py
assert lst, "median needs a non-empty list"
n = len(lst)
p = q = n // 2
if n < 3:
p, q = 0, n - 1
else:
lst = lst if ordered else sorted(lst)
if not n % 2: # for even-length lists, use mean of mid 2 nums
q = p - 1
return lst[p] if p == q else (lst[p] + lst[q]) / 2
class FastMedian:
# https://github.com/emilianavt/OpenSeeFace/blob/6f24efc4f58eb7cca47ec2146d934eabcc207e46/remedian.py
# Initialization
def __init__(self, inits: typing.Optional[typing.Sequence] = [], k=64): # after some experimentation, 64 works ok
self.all, self.k = [], k
self.more, self.__median = None, None
if inits is not None:
[self + x for x in inits]
# When full, push the median of current values to next list, then reset.
def __add__(self, x):
self.__median = None
self.all.append(x) # It would be faster to pre-allocate an array and assign it by index.
if len(self.all) == self.k:
self.more = self.more or FastMedian(k=self.k)
self.more + self.__medianPrim(self.all)
# It's going to be slower because of the re-allocation.
self.all = [] # reset
# If there is a next list, ask its median. Else, work it out locally.
def median(self):
return self.more.median() if self.more else self.__medianPrim(self.all)
# Only recompute median if we do not know it already.
def __medianPrim(self, all):
if self.__median is None:
self.__median = lst_median(all, ordered=False)
return self.__median
def resource_path(relative_path: Union[str, Path]) -> str:
"""
Get absolute path to resource, works for dev and for PyInstaller
"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = Path(sys._MEIPASS)
except AttributeError:
base_path = Path(".")
return str(base_path / relative_path)

View File

@ -0,0 +1,171 @@
import functools
import math
import sys
import timeit
def TimeitWrapper(*args, **kwargs):
"""
This decorator @TimeitWrapper() prints the function name and execution time in seconds.
:param args:
:param kwargs:
:return:
"""
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = timeit.default_timer()
results = function(*args, **kwargs)
end = timeit.default_timer()
print('{} execution time: {:.10f} s'.format(function.__name__, end - start))
return results
return wrapper
return decorator
class TimeitResult(object):
"""
from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55
Object returned by the timeit magic with info about the run.
Contains the following attributes :
loops: (int) number of loops done per measurement
repeat: (int) number of times the measurement has been repeated
best: (float) best execution time / number
all_runs: (list of float) execution time of each run (in s)
"""
def __init__(self, loops, repeat, best, worst, all_runs, precision):
self.loops = loops
self.repeat = repeat
self.best = best
self.worst = worst
self.all_runs = all_runs
self._precision = precision
self.timings = [dt / self.loops for dt in all_runs]
@property
def average(self):
return math.fsum(self.timings) / len(self.timings)
@property
def stdev(self):
mean = self.average
return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
def __str__(self):
pm = '+-'
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb1'.encode(sys.stdout.encoding)
pm = u'\xb1'
except:
pass
return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
pm=pm,
runs=self.repeat,
loops=self.loops,
loop_plural="" if self.loops == 1 else "s",
run_plural="" if self.repeat == 1 else "s",
mean=format_time(self.average, self._precision),
std=format_time(self.stdev, self._precision),
best=format_time(self.best, self._precision),
worst=format_time(self.worst, self._precision),
)
def _repr_pretty_(self, p, cycle):
unic = self.__str__()
p.text(u'<TimeitResult : ' + unic + u'>')
class FPSResult(object):
"""
base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55
"""
def __init__(self, loops, repeat, best, worst, all_runs, precision):
self.loops = loops
self.repeat = repeat
self.best = 1 / best
self.worst = 1 / worst
self.all_runs = all_runs
self._precision = precision
self.fps = [1 / dt for dt in all_runs]
self.unit = "fps"
@property
def average(self):
return math.fsum(self.fps) / len(self.fps)
@property
def stdev(self):
mean = self.average
return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5
def __str__(self):
pm = '+-'
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb1'.encode(sys.stdout.encoding)
pm = u'\xb1'
except:
pass
return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
pm=pm,
runs=self.repeat,
loops=self.loops,
loop_plural="" if self.loops == 1 else "s",
run_plural="" if self.repeat == 1 else "s",
mean="%.*g%s" % (self._precision, self.average, self.unit),
std="%.*g%s" % (self._precision, self.stdev, self.unit),
best="%.*g%s" % (self._precision, self.best, self.unit),
worst="%.*g%s" % (self._precision, self.worst, self.unit),
)
def _repr_pretty_(self, p, cycle):
unic = self.__str__()
p.text(u'<FPSResult : ' + unic + u'>')
def format_time(timespan, precision=3):
"""
https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473
Formats the timespan in a human readable form
"""
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(u'%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = [u"s", u"ms", u'us', "ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb5'.encode(sys.stdout.encoding)
units = [u"s", u"ms", u'\xb5s', "ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return u"%.*g %s" % (precision, timespan * scaling[order], units[order])

View File

@ -261,4 +261,4 @@ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
(2) All disclaimers, limitations of liability, and indemnification (2) All disclaimers, limitations of liability, and indemnification
obligations survive termination. obligations survive termination.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS

19
Makefile Normal file
View File

@ -0,0 +1,19 @@
.DEFAULT_GOAL := run
install:
poetry install
run:
cd EyeTrackApp/ && poetry run python eyetrackapp.py
pyinstaller:
poetry run pyinstaller EyeTrackApp/eyetrackapp.spec EyeTrackApp/eyetrackapp.py
clean:
rm -rf build/
rm -rf dist/
rm -rf EyeTrackApp/__pycache__/
rm -rf EyeTrackApp/app/__pycache__/
rm -rf EyeTrackApp/app/algorithms/__pycache__/
rm EyeTrackApp/eyetrack_settings.backup
rm EyeTrackApp/eyetrack_settings.json

View File

@ -13,38 +13,30 @@ Picture courtesy of Wackalope#6737
- [Contact](#contact) - [Contact](#contact)
- [Licenses](#licenses) - [Licenses](#licenses)
Active Development: [V2 App Branch](https://github.com/EyeTrackVR/EyeTrackVR/tree/v2.0-beta-feature-branch) ### [Documentation](https://redhawk989.github.io/EyeTrackVR/)
### [Documentation](https://docs.eyetrackvr.dev/)
# EyeTrackVR # EyeTrackVR
Source First and *affordable* VR eye tracker platform for [VRChat](https://hello.vrchat.com/) via `OSC` and `UDP` protocol. Open source and *affordable* VR eye tracker platform for [VRChat](https://hello.vrchat.com/) via `OSC` and `UDP` protocol.
> **Note**: This project is in development and is not fully finished
> [!IMPORTANT]\
> This project is in active development and is not fully finished
## Documentation ## Documentation
Please check out our [documentation site.](https://docs.eyetrackvr.dev/) Please check out our [documentation site](https://redhawk989.github.io/EyeTrackVR/)
## Hardware ## Hardware
3d files for mounting brackets can be found [here](https://github.com/RedHawk989/EyeTrackVR-Hardware) 3d files for mounting brackets will be found [here](https://github.com/RedHawk989/EyeTrackVR-Hardware)
IR emitter PCB files are also located there, along with pre-soldered PCBs on the [official store](https://store.eyetrackvr.dev/). For more info, please reference our [documentation site](https://docs.eyetrackvr.dev/how_to_build/parts_list) IR emitter files are also located there. For more info please reference our parts list on our [documentation site](https://redhawk989.github.io/EyeTrackVR/parts-list/)
## ESP-CAM Firmware
Current work has been with our official firmware by `lorow`, found [here](https://github.com/lorow/OpenIris).
## ESP32 Cam Firmware
Current work has been with our official firmware by `lorow` and `ZanzyTHEbar`, found [here](https://github.com/EyeTrackVR/OpenIris).
## Headset support ## Headset support
Pretty much any headset that can fit the camera and LEDs can be supported. However, mounts may not have been made for it. Please reference our [parts list](https://docs.eyetrackvr.dev/how_to_build/parts_list#_3d-printed-mounts) for linked mounts and [create your own mount page](https://docs.eyetrackvr.dev/how_to_build/creating_your_own_mount) for details. Any headset that can fit the camera and LEDs can likely be supported, however, mounts may not have been made for it. Please reference to our [parts list](https://redhawk989.github.io/EyeTrackVR/parts-list/#other-parts) for details.
## About IR Emitter Safety ## About IR Emitter Safety
@ -55,17 +47,14 @@ Please *exercise extreme caution* when messing around with IR emitters.
The safety measures were put in place to REDUCE the potential failure risk. All further safety responsibilities are on the user. The safety measures were put in place to REDUCE the potential failure risk. All further safety responsibilities are on the user.
This includes visually checking with an IR camera that the brightness is correct and making sure you do not feel warmth. This includes visually checking with an IR camera that the brightness is correct and making sure you do not feel warmth.
> **Warning**: Make sure you are using **NON-focused** emitters and at power less than ***5mW cm^2 total per eye***. > [!WARNING]\
Please read our LED safety page for a breakdown of math for our V3 and V4 LED kits [here](https://docs.eyetrackvr.dev/getting_started/led_safety) > Make sure you are using **NON-focused** emitters and at around ***5ma total power per eye***.
[Effect of infrared radiation on the lens](https://docs.eyetrackvr.dev/safety/effect_of_ir_on_the_lens.pdf)
[Training-library Nir Stds](https://docs.eyetrackvr.dev/safety/training-library_nir_stds_20021011.pdf)
[AN002_Details on photobiological safety of LED light sources](https://docs.eyetrackvr.dev/safety/AN002_Details_on_photobiological_safety_of_LED_light_sources.pdf)
[Effect of infrared radiation on the lens](https://github.com/RedHawk989/EyeTrackVR/blob/docs/docs/Reference_Docs/saftey/effect_of_ir_on_the_lens.pdf)
[Training-library Nir Stds](https://github.com/RedHawk989/EyeTrackVR/blob/docs/docs/Reference_Docs/saftey/training-library_nir_stds_20021011.pdf)
[AN002_Details on photobiological safety of LED light sources](https://github.com/RedHawk989/EyeTrackVR/blob/docs/docs/Reference_Docs/saftey/AN002_Details_on_photobiological_safety_of_LED_light_sources.pdf)
## Contact ## Contact
@ -75,9 +64,8 @@ Please join our Discord for updates and any questions.
## Licenses ## Licenses
[![GitHub license](https://img.shields.io/github/license/RedHawk989/EyeTrackVR?style=plastic)](https://github.com/RedHawk989/EyeTrackVR/blob/master/LICENSE) ***All software is licensed under the Babble Software Distribution License 1.0 unless marked otherwise.***
***All software is licensed under the Babble Software Distribution License 1.0 unless marked otherwise.
All documentation, including the [Wiki](https://github.com/RedHawk989/EyeTrackVR/wiki), is under the Creative Commons CC-BY-SA-4.0 license***. All documentation, including the [Wiki](https://github.com/RedHawk989/EyeTrackVR/wiki), is under the Creative Commons CC-BY-SA-4.0 license***.
<!-- <div align="center"> <!-- <div align="center">

125
conftest.py Normal file
View File

@ -0,0 +1,125 @@
import pytest
from config import (
EyeTrackConfig,
EyeTrackCameraConfig,
EyeTrackSettingsConfig,
)
@pytest.fixture()
def eyetrack_settings_config():
return EyeTrackSettingsConfig(
gui_flip_x_axis_left=False,
gui_flip_x_axis_right=False,
gui_flip_y_axis=False,
gui_RANSAC3D=False,
gui_HSF=False,
gui_BLOB=False,
gui_BLINK=False,
gui_HSRAC=False,
gui_AHSFRAC=False,
gui_AHSF=False,
gui_DADDY=False,
gui_LEAP=True,
gui_HSF_radius=15,
gui_HSF_radius_left=10,
gui_HSF_radius_right=10,
gui_min_cutoff="0.0004",
gui_speed_coefficient="0.9",
gui_osc_address="127.0.0.1",
gui_osc_port=8889,
gui_osc_receiver_port=9001,
gui_osc_recenter_address="/avatar/parameters/etvr_recenter",
gui_osc_recalibrate_address="/avatar/parameters/etvr_recalibrate",
gui_blob_maxsize=25.0,
gui_blob_minsize=10.0,
gui_recenter_eyes=False,
tracker_single_eye=2,
gui_threshold=65,
gui_AHSFRACP=1,
gui_AHSFP=2,
gui_HSRACP=3,
gui_HSFP=4,
gui_DADDYP=5,
gui_RANSAC3DP=6,
gui_BLOBP=7,
gui_LEAPP=8,
gui_IBO=True,
gui_skip_autoradius=False,
gui_thresh_add=11,
gui_update_check=False,
gui_ROSC=False,
gui_circular_crop_right=False,
gui_circular_crop_left=False,
ibo_filter_samples=400,
ibo_average_output_samples=0,
ibo_fully_close_eye_threshold=0.3,
calibration_samples=600,
osc_right_eye_close_address="/avatar/parameters/RightEyeLidExpandedSqueeze",
osc_left_eye_close_address="/avatar/parameters/LeftEyeLidExpandedSqueeze",
osc_left_eye_x_address="/avatar/parameters/LeftEyeX",
osc_right_eye_x_address="/avatar/parameters/RightEyeX",
osc_eyes_y_address="/avatar/parameters/EyesY",
osc_invert_eye_close=False,
gui_RANSACBLINK=False,
gui_right_eye_dominant=False,
gui_left_eye_dominant=False,
gui_outer_side_falloff=False,
gui_eye_dominant_diff_thresh=0.3,
gui_legacy_ransac=False,
gui_legacy_ransac_thresh_right=80,
gui_legacy_ransac_thresh_left=80,
gui_LEAP_lid=False,
gui_osc_vrcft_v1=False,
gui_osc_vrcft_v2=False,
gui_vrc_native=False,
gui_pupil_dilation=True,
)
@pytest.fixture()
def eyetrack_camera_config():
return EyeTrackCameraConfig(
rotation_angle=250,
roi_window_x=67,
roi_window_y=27,
roi_window_w=96,
roi_window_h=117,
focal_length=30,
capture_source="http://192.168.0.31/",
calib_XMAX=122.5,
calib_XMIN=38.0,
calib_YMAX=118.0,
calib_YMIN=6.0,
calib_XOFF=40.0,
calib_YOFF=63.0,
calibration_points=[],
)
@pytest.fixture()
def main_config(eyetrack_camera_config, eyetrack_settings_config):
return EyeTrackConfig(
right_eye=eyetrack_camera_config,
left_eye=eyetrack_camera_config,
settings=eyetrack_settings_config,
eye_display_id=0,
)
@pytest.fixture()
def main_config_v1_params(main_config):
main_config.settings.gui_osc_vrcft_v1 = True
return main_config
@pytest.fixture()
def main_config_v2_params(main_config):
main_config.settings.gui_osc_vrcft_v2 = True
return main_config
@pytest.fixture()
def main_config_native_params(main_config):
main_config.settings.gui_vrc_native = True
return main_config

10
docker-compose.yml Normal file
View File

@ -0,0 +1,10 @@
version: '3.4'
services:
etvrtrackingbackend:
image: eyetrackvrapp
build:
context: .
dockerfile: ./Dockerfile
ports:
- 8000:8000

1778
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,33 +1,49 @@
[tool.poetry] [tool.poetry]
name = "EyeTrackVR" name = "EyeTrackVR"
version = "0.1.7.2" version = "0.2"
description = "Opensource, affordable VR eye tracker for VRChat" description = "Opensource, affordable VR eye tracker for VRChat"
authors = ["RedHawk989"] authors = ["RedHawk989"]
license = "MIT" license = "LICENSE: Babble Software Distribution License 1.0"
repository = "https://github.com/RedHawk989/EyeTrackVR" repository = "https://github.com/EyeTrackVR/EyeTrackVR"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "~3.11.0" python = "~3.11.0"
python-osc = "^1.8.0" python-osc = "^1.8.0"
opencv-python = "^4.6.0.66"
numpy = "^1.23.4"
pye3d = "^0.3.1.post1"
requests = "^2.28.1" requests = "^2.28.1"
opencv-python = "^4.6.0.66"
numpy = "~1.23.5"
pye3d = "^0.3.2"
pysimplegui-4-foss = "^4.6.4.1" pysimplegui-4-foss = "^4.6.4.1"
pydantic = "^1.10.2" pydantic = "^2.4.2"
win10toast_click = "^0.1.2" scikit-image = "*"
beautifulsoup4 = "^4.11.1" pyserial = "^3.5"
winotify = [
{ version = "^1.1.0", platform = 'win32' }
]
onnxruntime = "^1.13.1"
colorama = "^0.4.6"
taskipy = "^1.10.4"
pytest = "^8.0.0"
pytest-cov = "^4.1.0"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
black = "^22.10.0" black = "^22.10.0"
pyinstaller = "^5.6.2" pyinstaller = "^5.6.2"
flake8 = "^5.0.4" flake8 = "^5.0.4"
[tool.taskipy.tasks]
dev = "python eyetrackapp.py"
[tool.black]
line-length = 120
[build-system] [build-system]
requires = ["poetry-core>=1.0.0"] requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.black] [tool.pytest.ini_options]
line-length = 135 addopts = "-ra -q"
exclude = "(.git|.env|venv)" pythonpath = "."
target-version = ["py310","py311"] python_files = [
"test_*.py"
]

View File

@ -0,0 +1,53 @@
; Script generated by the Inno Setup Script Wizard.
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
#define MyAppName "EyeTrackVR"
#define MyAppVersion "0.2 BETA 10"
#define MyAppPublisher "EyeTrackVR"
#define MyAppURL "https://redhawk989.github.io/EyeTrackVR/"
#define MyAppExeName "eyetrackapp.exe"
[Setup]
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
AppId={{60B32C3C-819B-4685-AF40-49AB93539405}
AppName={#MyAppName}
AppVersion={#MyAppVersion}
;AppVerName={#MyAppName} {#MyAppVersion}
AppPublisher={#MyAppPublisher}
AppPublisherURL={#MyAppURL}
AppSupportURL={#MyAppURL}
AppUpdatesURL={#MyAppURL}
DefaultDirName={autopf}\{#MyAppName}
DisableProgramGroupPage=yes
OutputDir=C:\Users\Prohurtz\Desktop\Output\
; Uncomment the following line to run in non administrative install mode (install for current user only.)
;PrivilegesRequired=lowest
OutputBaseFilename=EyeTrackVR-Setup
SetupIconFile=C:\Users\Prohurtz\PycharmProjects\EyeTrackVR\EyeTrackApp\Images\logo.ico
Compression=lzma/ultra64
SolidCompression=yes
WizardStyle=modern
[Languages]
Name: "english"; MessagesFile: "compiler:Default.isl"
[Tasks]
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}";
[Files]
Source: "C:\Users\Prohurtz\PycharmProjects\EyeTrackVR\EyeTrackApp\dist\{#MyAppExeName}"; DestDir: "{app}"; Flags: ignoreversion
Source: "C:\Users\Prohurtz\PycharmProjects\EyeTrackVR\EyeTrackApp\dist\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Dirs]
Name: {app}; Permissions: users-full
[Icons]
Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"
Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: desktopicon
[Run]
Filename: "{app}\{#MyAppExeName}"; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; Flags: nowait postinstall skipifsilent

View File

@ -0,0 +1,14 @@
:: Example script to auto build app and make an installer
:: File paths will all need to be updated to your setup
cd C:\Users\beaul\PycharmProjects\EyeTrackVR\EyeTrackApp
pyinstaller eyetrackapp.spec --noconfirm
cd C:\Users\beaul\OneDrive\Desktop
cd C:\Program Files (x86)\Inno Setup 6
ISCC C:\Users\beaul\OneDrive\Desktop\ETVR_SETUP.iss
cls
@echo off
color 0A
echo -------------------------------
echo ############ DONE #############
echo -------------------------------
PAUSE

24
scripts/setup.sh Normal file
View File

@ -0,0 +1,24 @@
#!/bin/sh
# This script is used to setup the environment for the build process.
# check if make is installed and if not install it
if ! command -v make &> /dev/null
then
echo "make could not be found, installing it now"
apt-get update
apt-get install ffmpeg libsm6 libxext6 -y
apt install libgl1-mesa-glx
apt install build-essential -y --no-install-recommends
apt-get install make
fi
# check if poetry is installed and if not install it
if ! command -v poetry &> /dev/null
then
echo "poetry could not be found, installing it now"
apt-get install python3-pip -y --no-install-recommends
pip3 install poetry
fi

20
tests/__init__.py Normal file
View File

@ -0,0 +1,20 @@
import dataclasses
@dataclasses.dataclass
class EyeInfoMock:
x: int
y: int
blink: float
pupil_dilation: float
avg_velocity: float
class SimpleUDPClientMock:
def __init__(self, osc_address, port):
self.osc_address = osc_address
self.port = port
self.messages = []
def send_message(self, address, value):
self.messages.append((address, value))

View File

@ -0,0 +1,269 @@
from queue import Queue
from time import sleep
from unittest import mock
import pytest
from osc.osc import OSCManager, OSCMessage
from osc.OSCMessage import OSCMessageType
from tests import EyeInfoMock, SimpleUDPClientMock
@pytest.mark.parametrize(
"messages,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=0,
avg_velocity=0,
),
),
),
],
[
("/tracking/eye/EyesClosedAmount", 0.0),
("/tracking/eye/LeftRightVec", [0.0, 0.0, 1.0, 0.0, 0.0, 1.0]),
],
),
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=0,
avg_velocity=0,
),
),
),
],
[
("/tracking/eye/EyesClosedAmount", 0.0),
("/tracking/eye/LeftRightVec", [0.0, 0.0, 1.0, 0.0, 0.0, 1.0]),
],
),
],
)
def test_send_command_native_params_single_eye(main_config_native_params, messages, expected_outcome):
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_native_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in messages:
sleep(0.01)
msg_queue.put(message)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome
@pytest.mark.parametrize(
"eye_data,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0.5,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/tracking/eye/EyesClosedAmount", 0.0),
("/tracking/eye/EyesClosedAmount", 0.0),
# we're expecting 621 as left_y here because that's the default value
# before the first state update with real data, but that's ok
# we're gonna be like 10 messages deep before anyone starts playing
# and if they already are, they won't be able to notice
("/tracking/eye/LeftRightVec", [0.0, 621.0, 1.0, 0.0, 0.0, 1.0]),
("/tracking/eye/EyesClosedAmount", 0.5),
("/tracking/eye/EyesClosedAmount", 0.5),
("/tracking/eye/LeftRightVec", [0.0, 5.0, 1.0, 0.0, 0.0, 1.0]),
],
),
# binary blink
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/LeftRightVec", [0.0, 621.0, 1.0, 0.0, 0.0, 1.0]),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/LeftRightVec", [0.0, 5.0, 1.0, 0.0, 0.0, 1.0]),
],
),
],
)
def test_send_command_native_params_dual_eye(main_config_native_params, eye_data, expected_outcome):
main_config_native_params.eye_display_id = 2
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_native_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in eye_data:
sleep(0.01)
msg_queue.put(message)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome
@pytest.mark.parametrize(
"eye_data,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/LeftRightVec", [0.0, 621.0, 1.0, 0.0, 0.0, 1.0]),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/EyesClosedAmount", 1.0),
("/tracking/eye/LeftRightVec", [0.0, 5.0, 1.0, 0.0, 0.0, 1.0]),
],
),
],
)
def test_send_command_native_params_eye_outer_side_falloff(main_config_native_params, eye_data, expected_outcome):
main_config_native_params.eye_display_id = 2
main_config_native_params.settings.gui_outer_side_falloff = True
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_native_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in eye_data:
msg_queue.put(message)
sleep(1)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome

274
tests/test_osc_v1_params.py Normal file
View File

@ -0,0 +1,274 @@
from queue import Queue
from time import sleep
from unittest import mock
import pytest
from osc.osc import OSCManager, OSCMessage
from osc.OSCMessage import OSCMessageType
from tests import EyeInfoMock, SimpleUDPClientMock
@pytest.mark.parametrize(
"messages,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=0,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/LeftEyeX", 0),
("/avatar/parameters/RightEyeX", 0),
("/avatar/parameters/EyesY", 0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 1.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0),
],
),
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=0,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/LeftEyeX", 0),
("/avatar/parameters/RightEyeX", 0),
("/avatar/parameters/EyesY", 0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 1.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0),
],
),
],
)
def test_send_command_v1_params_single_eye(main_config_v1_params, messages, expected_outcome):
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_v1_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in messages:
sleep(0.01)
msg_queue.put(message)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome
@pytest.mark.parametrize(
"eye_data,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0.5,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0),
("/avatar/parameters/RightEyeX", 0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.5),
("/avatar/parameters/LeftEyeX", 0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.5),
("/avatar/parameters/EyesY", 2.5),
],
),
# binary blink
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeX", 0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeX", 0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/EyesY", 2.5),
],
),
],
)
def test_send_command_v1_params_dual_eye(main_config_v1_params, eye_data, expected_outcome):
main_config_v1_params.eye_display_id = 2
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_v1_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in eye_data:
sleep(0.01)
msg_queue.put(message)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome
@pytest.mark.parametrize(
"eye_data,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeX", 0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/LeftEyeX", 0),
("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0),
("/avatar/parameters/EyesY", 2.5),
],
),
],
)
def test_send_command_v1_params_eye_outer_side_falloff(main_config_v1_params, eye_data, expected_outcome):
main_config_v1_params.eye_display_id = 2
main_config_v1_params.settings.gui_outer_side_falloff = True
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_v1_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in eye_data:
msg_queue.put(message)
sleep(1)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome

275
tests/test_osc_v2_params.py Normal file
View File

@ -0,0 +1,275 @@
from queue import Queue
from time import sleep
from unittest import mock
import pytest
from osc.osc import OSCManager, OSCMessage
from osc.OSCMessage import OSCMessageType
from tests import EyeInfoMock, SimpleUDPClientMock
@pytest.mark.parametrize(
"messages,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=0,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/v2/EyeX", 0),
("/avatar/parameters/v2/EyeY", 0),
("/avatar/parameters/v2/EyeLid", 1.0),
("/avatar/parameters/v2/EyeLid", 1.0),
],
),
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=0,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/v2/EyeX", 0),
("/avatar/parameters/v2/EyeY", 0),
("/avatar/parameters/v2/EyeLid", 1.0),
("/avatar/parameters/v2/EyeLid", 1.0),
],
),
],
)
def test_send_command_v2_params_single_eye(main_config_v2_params, messages, expected_outcome):
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_v2_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in messages:
sleep(0.01)
msg_queue.put(message)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome
@pytest.mark.parametrize(
"eye_data,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=1,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0.5,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/v2/EyeLidRight", 1.0),
("/avatar/parameters/v2/EyeRightX", 0),
("/avatar/parameters/v2/EyeRightY", 0),
("/avatar/parameters/v2/EyeLidRight", 1.0),
("/avatar/parameters/v2/EyeLidLeft", 0.5),
("/avatar/parameters/v2/EyeLeftX", 10),
("/avatar/parameters/v2/EyeLeftY", 5),
("/avatar/parameters/v2/EyeLidLeft", 0.5),
],
),
# binary blink
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeRightX", 0),
("/avatar/parameters/v2/EyeRightY", 0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLeftX", 10),
("/avatar/parameters/v2/EyeLeftY", 5),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
],
),
],
)
def test_send_command_v2_params_dual_eye(main_config_v2_params, eye_data, expected_outcome):
main_config_v2_params.eye_display_id = 2
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_v2_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in eye_data:
sleep(0.01)
msg_queue.put(message)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome
@pytest.mark.parametrize(
"eye_data,expected_outcome",
[
(
[
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
0,
EyeInfoMock(
x=0,
y=0,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
OSCMessage(
type=OSCMessageType.EYE_INFO,
data=(
1,
EyeInfoMock(
x=10,
y=5,
blink=0,
pupil_dilation=1,
avg_velocity=0,
),
),
),
],
[
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeRightX", 0),
("/avatar/parameters/v2/EyeRightY", 0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLidRight", 0.0),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
("/avatar/parameters/v2/EyeLeftX", 10),
("/avatar/parameters/v2/EyeLeftY", 5),
("/avatar/parameters/v2/EyeLidLeft", 0.0),
],
),
],
)
def test_send_command_v2_params_eye_outer_side_falloff(main_config_v2_params, eye_data, expected_outcome):
main_config_v2_params.eye_display_id = 2
main_config_v2_params.settings.gui_outer_side_falloff = True
with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock):
msg_queue = Queue()
client = OSCManager(
config=main_config_v2_params,
osc_message_in_queue=msg_queue,
)
client.start()
for message in eye_data:
msg_queue.put(message)
sleep(1)
client.shutdown()
assert msg_queue.empty()
assert client.osc_sender.client.messages == expected_outcome