Streaming local llm server and client.
- streaming llm server and client
- session management and persistence
- automatic title summary
- syntax highlighting and latex formatting
- image generation capabilities
sudo apt-get install sqlite3 libsqlite3-dev
# for whatever reason these modules have to be installed separately
pip install packaging torch
# Install the rest of the dependencies
pip install -r requirements.txt
Run any text-generation model from huggingface
docker build . --tag="turbo-genius"
docker run --gpus all -e HUGGINGFACE_TOKEN=<token> -d -p 8000:8000 turbo-genius
python server.py --model <model> --image_generation --image_cpu_offload
python cli.py --host <host> --port <port>
python client.py --host <host> --port <port>