|
|
|
|
@@ -13,17 +13,20 @@ services:
|
|
|
|
|
immich-server:
|
|
|
|
|
container_name: immich_server
|
|
|
|
|
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
|
|
|
|
# extends:
|
|
|
|
|
# file: hwaccel.transcoding.yml
|
|
|
|
|
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
|
|
|
|
extends:
|
|
|
|
|
file: hwaccel.transcoding.yml
|
|
|
|
|
service: nvenc # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
|
|
|
|
volumes:
|
|
|
|
|
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
|
|
|
|
|
- ${UPLOAD_LOCATION}:/data
|
|
|
|
|
- /etc/localtime:/etc/localtime:ro
|
|
|
|
|
env_file:
|
|
|
|
|
- .env
|
|
|
|
|
ports:
|
|
|
|
|
- '2283:2283'
|
|
|
|
|
# ports:
|
|
|
|
|
# - '2283:2283'
|
|
|
|
|
networks:
|
|
|
|
|
- default
|
|
|
|
|
- caddynet
|
|
|
|
|
depends_on:
|
|
|
|
|
- redis
|
|
|
|
|
- database
|
|
|
|
|
@@ -35,10 +38,10 @@ services:
|
|
|
|
|
container_name: immich_machine_learning
|
|
|
|
|
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
|
|
|
|
|
# Example tag: ${IMMICH_VERSION:-release}-cuda
|
|
|
|
|
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
|
|
|
|
|
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
|
|
|
|
|
# file: hwaccel.ml.yml
|
|
|
|
|
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
|
|
|
|
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda
|
|
|
|
|
extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
|
|
|
|
|
file: hwaccel.ml.yml
|
|
|
|
|
service: cuda # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
|
|
|
|
volumes:
|
|
|
|
|
- model-cache:/cache
|
|
|
|
|
env_file:
|
|
|
|
|
@@ -49,7 +52,7 @@ services:
|
|
|
|
|
|
|
|
|
|
redis:
|
|
|
|
|
container_name: immich_redis
|
|
|
|
|
image: docker.io/valkey/valkey:9@sha256:546304417feac0874c3dd576e0952c6bb8f06bb4093ea0c9ca303c73cf458f63
|
|
|
|
|
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
|
|
|
|
|
healthcheck:
|
|
|
|
|
test: redis-cli ping || exit 1
|
|
|
|
|
restart: always
|
|
|
|
|
@@ -69,8 +72,9 @@ services:
|
|
|
|
|
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
|
|
|
|
|
shm_size: 128mb
|
|
|
|
|
restart: always
|
|
|
|
|
healthcheck:
|
|
|
|
|
disable: false
|
|
|
|
|
|
|
|
|
|
volumes:
|
|
|
|
|
model-cache:
|
|
|
|
|
networks:
|
|
|
|
|
caddynet:
|
|
|
|
|
external: true
|
|
|
|
|
|