Add environment configuration and Docker Compose setup for Open WebUI and LiteLLM

This commit is contained in:
pazpi 2025-05-24 18:19:12 +02:00
parent 22697d1913
commit 56ea386f83
3 changed files with 161 additions and 11 deletions

View file

@ -0,0 +1,14 @@
# OPEN WEBUI
OAUTH_CLIENT_ID=
OAUTH_CLIENT_SECRET=
AUDIO_STT_AZURE_API_KEY=
AUDIO_TTS_API_KEY=
# LITELLM
LITELLM_MODE="PRODUCTION"
LITELLM_MASTER_KEY=sk-<master_key>
LITELLM_SALT_KEY=sk-<salt_key>
AZURE_API_BASE_OPENAI=https://<resource-name>.openai.azure.com/
AZURE_API_KEY_OPENAI=
AZURE_API_BASE_AI=https://<resource-name>.services.ai.azure.com/models
AZURE_API_KEY_AI=

View file

@ -1,27 +1,85 @@
services: services:
openwebui: openwebui:
image: ghcr.io/open-webui/open-webui:main image: ghcr.io/open-webui/open-webui:main
container_name: open-webui
ports: ports:
- "4000:8080" - "4080:8080"
volumes: volumes:
- open-webui:/app/backend/data - open-webui:/app/backend/data
env_file:
- .env
environment: environment:
WEBUI_URL: https://ai.${DOMAIN}
ENABLE_OPENAI_API: true ENABLE_OPENAI_API: true
OPENAI_API_BASE_URL: http://open-webui.home:12345 OPENAI_API_BASE_URL: "http://litellm:4000"
ENABLE_OLLAMA_API: false ENABLE_OLLAMA_API: false
ENABLE_IMAGE_GENERATION: true
IMAGE_SIZE: "1024x1024"
IMAGE_GENERATION_MODEL: "Dall-e 3"
ENABLE_WEB_SEARCH: true ENABLE_WEB_SEARCH: true
WEB_SEARCH_ENGINE: searxng ENABLE_RAG_WEB_SEARCH: True
SEARXNG_QUERY_URL: https://search.${DOMAIN}/search?q=<query> RAG_WEB_SEARCH_ENGINE: "searxng"
OAUTH_CLIENT_ID: ${OAUTH_CLIENT_ID} RAG_WEB_SEARCH_RESULT_COUNT: 3
OAUTH_CLIENT_SECRET: ${OAUTH_CLIENT_SECRET} RAG_WEB_SEARCH_CONCURRENT_REQUESTS: 10
OAUTH_PROVIDER_NAME: authentik SEARXNG_QUERY_URL: "https://search.${DOMAIN}/search?q=<query>"
OPENID_PROVIDER_URL: https://auth.${DOMAIN}/application/o/openwebui/.well-known/openid-configuration WEB_SEARCH_ENGINE: "searxng"
OPENID_REDIRECT_URI: https://ai.${DOMAIN}/oauth/oidc/callback OAUTH_PROVIDER_NAME: "authentik"
OPENID_PROVIDER_URL: "https://auth.${DOMAIN}/application/o/openwebui/.well-known/openid-configuration"
OPENID_REDIRECT_URI: "https://ai.${DOMAIN}/oauth/oidc/callback"
ENABLE_OAUTH_SIGNUP: true ENABLE_OAUTH_SIGNUP: true
ENABLE_LOGIN_FORM: false ENABLE_LOGIN_FORM: false
AUDIO_STT_ENGINE: "azure"
AUDIO_STT_AZURE_REGION: "swedencentral"
AUDIO_TTS_ENGINE: "azure"
AUDIO_TTS_AZURE_SPEECH_REGION: "swedencentral"
labels: labels:
- "com.centurylinklabs.watchtower.enable=true" - "com.centurylinklabs.watchtower.enable=true"
litellm:
container_name: litellm
image: ghcr.io/berriai/litellm:main-stable
volumes:
- ./litellm-config.yaml:/app/config.yaml
command:
- "--config=/app/config.yaml"
ports:
- "4000:4000" # Map the container port to the host, change the host port if necessary
environment:
DATABASE_URL: "postgresql://llmproxy:Develop9090@db:5432/litellm"
env_file:
- .env # Load local .env file
depends_on:
- db # Indicates that this service depends on the 'db' service, ensuring 'db' starts first
healthcheck: # Defines the health check configuration for the container
test: [
"CMD",
"curl",
"-f",
"http://localhost:4000/health/liveliness || exit 1",
] # Command to execute for health check
interval: 30s # Perform health check every 30 seconds
timeout: 10s # Health check command times out after 10 seconds
retries: 3 # Retry up to 3 times if health check fails
start_period: 40s # Wait 40 seconds after container start before beginning health checks
labels:
- "com.centurylinklabs.watchtower.enable=true"
db:
image: postgres:16
restart: always
container_name: litellm_db
environment:
POSTGRES_DB: litellm
POSTGRES_USER: llmproxy
POSTGRES_PASSWORD: Develop9090
volumes:
- postgres_data:/var/lib/postgresql/data # Persists Postgres data across container restarts
healthcheck:
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
interval: 1s
timeout: 5s
retries: 10
volumes: volumes:
open-webui: open-webui:
postgres_data:
name: litellm_postgres_data # Named volume for Postgres data persistence

View file

@ -0,0 +1,78 @@
environment_variables: {}
router_settings: {}
general_settings:
proxy_batch_write_at: 60 # Batch write spend updates every 60s
proxy_batch_write_at: 60 # Batch write spend updates every 60s
database_connection_pool_limit: 10 # limit the number of database connections to = MAX Number of DB Connections/Number of instances of litellm proxy (Around 10-20 is good number)
# OPTIONAL Best Practices
disable_spend_logs: True # turn off writing each transaction to the db. We recommend doing this is you don't need to see Usage on the LiteLLM UI and are tracking metrics via Prometheus
disable_error_logs: True # turn off writing LLM Exceptions to DB
allow_requests_on_db_unavailable: True # Only USE when running LiteLLM on your VPC. Allow requests to still be processed even if the DB is unavailable. We recommend doing this if you're running LiteLLM on VPC that cannot be accessed from the public internet.
store_model_in_db: True
litellm_settings:
request_timeout: 600 # raise Timeout error if call takes longer than 600 seconds. Default value is 6000seconds if not set
set_verbose: False # Switch off Debug Logging, ensure your logs do not have any debugging on
json_logs: true # Get debug logs in json format
credential_list:
- credential_name: azure_openai_credential
credential_values:
api_base: os.environ/AZURE_API_BASE_OPENAI
api_key: os.environ/AZURE_API_KEY_OPENAI
credential_info:
description: Azure OpenAI credentials
- credential_name: azure_ai_credential
credential_values:
api_base: os.environ/AZURE_API_BASE_AI
api_key: os.environ/AZURE_API_KEY_AI
credential_info:
description: Azure AI credentials
model_list:
- model_name: text-embedding-3-large
litellm_params:
model: azure/text-embedding-3-large
litellm_credential_name: azure_openai_credential
- model_name: text-embedding-3-small
litellm_params:
model: azure/text-embedding-3-small
litellm_credential_name: azure_openai_credential
- model_name: GPT-4.1
litellm_params:
model: azure/gpt-4.1
litellm_credential_name: azure_openai_credential
- model_name: GPT-4.1 Mini
litellm_params:
model: azure/gpt-4.1-mini
litellm_credential_name: azure_openai_credential
- model_name: GPT-3.5 Turbo
litellm_params:
model: azure/gpt-3.5-turbo
litellm_credential_name: azure_openai_credential
- model_name: GPT-4o
litellm_params:
model: azure/gpt-4o
litellm_credential_name: azure_openai_credential
- model_name: GPT-4o Mini
litellm_params:
model: azure/gpt-4o-mini
litellm_credential_name: azure_openai_credential
- model_name: o3 Mini
litellm_params:
model: azure/o3-mini
api_version: 2024-12-01-preview
litellm_credential_name: azure_openai_credential
- model_name: GPT-4o Audio
litellm_params:
model: azure/azure-openai-4o-audio
litellm_credential_name: azure_openai_credential
- model_name: Dall-e 3
litellm_params:
model: azure/dall-e-3
litellm_credential_name: azure_openai_credential
- model_name: DeepSeek-R1
litellm_params:
model: azure_ai/DeepSeek-R1
litellm_credential_name: azure_ai_credential