Added README and switched all openai stuff to support llama.cpp

This commit is contained in:
Dave Niewinski 2024-01-23 01:21:06 +00:00
parent 2f3f4fbaeb
commit 6e5b275f7f
3 changed files with 41 additions and 4 deletions

36
README.md Normal file
View File

@ -0,0 +1,36 @@
# GLaDOS
## LLM
git clone --depth=1 https://github.com/dusty-nv/jetson-containers
cd jetson-containers
./run.sh --workdir=/opt/llama.cpp/bin $(./autotag llama_cpp) /bin/bash -c './server --model $(huggingface-downloader TheBloke/openchat_3.5-GGUF/openchat_3.5.Q4_K_S.gguf) --n-gpu-layers 999 --threads $(nproc) -c 2048'
## RIVA
Look here [https://docs.nvidia.com/deeplearning/riva/user-guide/docs/quick-start-guide.html](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/quick-start-guide.html)
ngc registry resource download-version nvidia/riva/riva_quickstart_arm64:2.13.1
cd riva_quickstart_arm64_v2.13.1
bash riva_init.sh
# See NeMo information for what to do here
riva_start.sh
## ASR
Check device number
python3 transcribe_mic.py --input-device=4
## TTS
Pair bluetooth speaker
See [https://developer.nvidia.com/embedded/learn/tutorials/connecting-bluetooth-audio](https://developer.nvidia.com/embedded/learn/tutorials/connecting-bluetooth-audio)
python3 glados_talk.py --voice=GLaDOS --play-audio
## Everything Else
roslaunch glados_bringup bringup.launch

View File

@ -1,7 +1,8 @@
<launch>
<include file="$(find openai_ros)/launch/openai.launch">
<arg name="model" value="gpt-3.5-turbo" />
<arg name="max_tokens" default="1024" />
<arg name="max_history_length" default="12" />
<arg name="model" value="/data/models/huggingface/models--TheBloke--openchat_3.5-GGUF/snapshots/9145d8078ca54b163b4b7f7d45cf525b63ee77f8/openchat_3.5.Q4_K_S.gguf" />
<arg name="max_tokens" value="1024" />
<arg name="max_history_length" value="12" />
<arg name="base_url" value="http://localhost:8080/v1" />
</include>
</launch>

@ -1 +1 @@
Subproject commit b734ec83d1bdb7eedd1c728deadff7c9b1a7b7f8
Subproject commit ed9a8cd5faabc7f82490f44aba20468ef50c04b8