diff --git a/backend/workers/ollama_manager.py b/backend/workers/ollama_manager.py index 64808f52d..3c6950b59 100644 --- a/backend/workers/ollama_manager.py +++ b/backend/workers/ollama_manager.py @@ -104,6 +104,14 @@ def refresh_models(self): self.config.set("llm.available_models", available_models) self.log.debug(f"OllamaManager: refreshed model list ({len(available_models)} models)") + # Reconcile enabled models: remove any that are no longer available + enabled_models = self.config.get("llm.enabled_models", []) + reconciled = [m for m in enabled_models if m in available_models] + if len(reconciled) != len(enabled_models): + removed = set(enabled_models) - set(reconciled) + self.log.info(f"OllamaManager: removed stale enabled model(s): {', '.join(removed)}") + self.config.set("llm.enabled_models", reconciled) + except requests.RequestException as e: self.log.warning(f"OllamaManager: could not refresh model list - request error: {e}") diff --git a/backend/workers/refresh_items.py b/backend/workers/refresh_items.py index 96a7da6b0..7ab11645d 100644 --- a/backend/workers/refresh_items.py +++ b/backend/workers/refresh_items.py @@ -13,15 +13,14 @@ class ItemUpdater(BasicWorker): type = "refresh-items" max_workers = 1 - @classmethod - def ensure_job(cls, config=None): - """ - Ensure that the refresher is always running - - :return: Job parameters for the worker - """ - return {"remote_id": "refresh-items", "interval": 60} + # ensure_job is intentionally disabled: this worker currently does nothing + # and would only create unnecessary job queue churn. Re-enable when work() + # has actual tasks to perform. + # @classmethod + # def ensure_job(cls, config=None): + # return {"remote_id": "refresh-items", "interval": 60} def work(self): + # Placeholder – no tasks implemented yet. self.job.finish() \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index de5dd318b..31843b2ce 100644 --- a/docker/README.md +++ b/docker/README.md @@ -63,8 +63,17 @@ the host via the exposed port). ### Configuring 4CAT to use Ollama +#### Automatic configuration (fresh Docker install with sidecar) + +When you start 4CAT for the first time using the Ollama override file, the +`docker_setup.py` initialisation script automatically detects the `ollama` +sidecar and sets **LLM Provider Type**, **LLM Server URL**, and **LLM Access** +for you. You can skip to step 2 below. + +#### Manual configuration (or to verify/change settings) + 1. Log in as admin and open **Control Panel → Settings**. -2. Set the following LLM fields: +2. Confirm or set the following LLM fields: | Setting | Value | |---|---| @@ -102,3 +111,7 @@ point 4CAT directly at that server: - **On the same host**: use `http://host.docker.internal:11434` as the LLM Server URL. - **Remote server**: use the server's reachable URL and configure any required API key in the *LLM Server API Key* and *LLM Server Authentication Type* settings. + +In both cases, configure the LLM settings manually via **Control Panel → Settings** +(see *Manual configuration* above), using the appropriate server URL instead of +`http://ollama:11434`.