Edit on GitHub

backend.workers.refresh_items

Refresh items

 1"""
 2Refresh items
 3"""
 4import json
 5
 6import requests
 7
 8from backend.lib.worker import BasicWorker
 9
10class ItemUpdater(BasicWorker):
11    """
12    Refresh 4CAT items
13
14    Refreshes settings that are dependent on external factors
15    """
16    type = "refresh-items"
17    max_workers = 1
18
19    @classmethod
20    def ensure_job(cls, config=None):
21        """
22        Ensure that the refresher is always running
23
24        This is used to ensure that the refresher is always running, and if it is
25        not, it will be started by the WorkerManager.
26
27        :return:  Job parameters for the worker
28        """
29        return {"remote_id": "refresh-items", "interval": 60}
30
31    def work(self):
32        # Refresh items
33        self.refresh_settings()
34
35        self.job.finish()
36
37    def refresh_settings(self):
38        """
39        Refresh settings
40        """
41        # LLM server settings
42        llm_provider = self.config.get("llm.provider_type", "none").lower()
43        llm_server = self.config.get("llm.server", "")
44
45        # For now we only support the Ollama API
46        if llm_provider == "ollama" and llm_server:
47            headers = {"Content-Type": "application/json"}
48            llm_api_key = self.config.get("llm.api_key", "")
49            llm_auth_type = self.config.get("llm.auth_type", "")
50            if llm_api_key and llm_auth_type:
51                headers[llm_auth_type] = llm_api_key
52
53            available_models = {}
54            try:
55                response = requests.get(f"{llm_server}/api/tags", headers=headers, timeout=10)
56                if response.status_code == 200:
57                    settings = response.json()
58                    for model in settings.get("models", []):
59                        model = model["name"]
60                        try:
61                            model_metadata = requests.post(f"{llm_server}/api/show", headers=headers, json={"model": model}, timeout=10).json()
62                            available_models[model] = {
63                                "name": f"{model_metadata['model_info']['general.basename']} ({model_metadata['details']['parameter_size']} parameters)",
64                                "model_card": f"https://ollama.com/library/{model}",
65                                "provider": "local"
66                            }
67                            
68                        except (requests.RequestException, json.JSONDecodeError, KeyError) as e:
69                            self.log.debug(f"Could not get metadata for model {model} from Ollama - skipping (error: {e})")
70
71                    self.config.set("llm.available_models", available_models)
72                    self.log.debug("Refreshed LLM server settings cache")
73                else:
74                    self.log.warning(f"Could not refresh LLM server settings cache - server returned status code {response.status_code}")
75
76            except requests.RequestException as e:
77                self.log.warning(f"Could not refresh LLM server settings cache - request error: {str(e)}")
78            
class ItemUpdater(backend.lib.worker.BasicWorker):
11class ItemUpdater(BasicWorker):
12    """
13    Refresh 4CAT items
14
15    Refreshes settings that are dependent on external factors
16    """
17    type = "refresh-items"
18    max_workers = 1
19
20    @classmethod
21    def ensure_job(cls, config=None):
22        """
23        Ensure that the refresher is always running
24
25        This is used to ensure that the refresher is always running, and if it is
26        not, it will be started by the WorkerManager.
27
28        :return:  Job parameters for the worker
29        """
30        return {"remote_id": "refresh-items", "interval": 60}
31
32    def work(self):
33        # Refresh items
34        self.refresh_settings()
35
36        self.job.finish()
37
38    def refresh_settings(self):
39        """
40        Refresh settings
41        """
42        # LLM server settings
43        llm_provider = self.config.get("llm.provider_type", "none").lower()
44        llm_server = self.config.get("llm.server", "")
45
46        # For now we only support the Ollama API
47        if llm_provider == "ollama" and llm_server:
48            headers = {"Content-Type": "application/json"}
49            llm_api_key = self.config.get("llm.api_key", "")
50            llm_auth_type = self.config.get("llm.auth_type", "")
51            if llm_api_key and llm_auth_type:
52                headers[llm_auth_type] = llm_api_key
53
54            available_models = {}
55            try:
56                response = requests.get(f"{llm_server}/api/tags", headers=headers, timeout=10)
57                if response.status_code == 200:
58                    settings = response.json()
59                    for model in settings.get("models", []):
60                        model = model["name"]
61                        try:
62                            model_metadata = requests.post(f"{llm_server}/api/show", headers=headers, json={"model": model}, timeout=10).json()
63                            available_models[model] = {
64                                "name": f"{model_metadata['model_info']['general.basename']} ({model_metadata['details']['parameter_size']} parameters)",
65                                "model_card": f"https://ollama.com/library/{model}",
66                                "provider": "local"
67                            }
68                            
69                        except (requests.RequestException, json.JSONDecodeError, KeyError) as e:
70                            self.log.debug(f"Could not get metadata for model {model} from Ollama - skipping (error: {e})")
71
72                    self.config.set("llm.available_models", available_models)
73                    self.log.debug("Refreshed LLM server settings cache")
74                else:
75                    self.log.warning(f"Could not refresh LLM server settings cache - server returned status code {response.status_code}")
76
77            except requests.RequestException as e:
78                self.log.warning(f"Could not refresh LLM server settings cache - request error: {str(e)}")

Refresh 4CAT items

Refreshes settings that are dependent on external factors

type = 'refresh-items'
max_workers = 1
@classmethod
def ensure_job(cls, config=None):
20    @classmethod
21    def ensure_job(cls, config=None):
22        """
23        Ensure that the refresher is always running
24
25        This is used to ensure that the refresher is always running, and if it is
26        not, it will be started by the WorkerManager.
27
28        :return:  Job parameters for the worker
29        """
30        return {"remote_id": "refresh-items", "interval": 60}

Ensure that the refresher is always running

This is used to ensure that the refresher is always running, and if it is not, it will be started by the WorkerManager.

Returns

Job parameters for the worker

def work(self):
32    def work(self):
33        # Refresh items
34        self.refresh_settings()
35
36        self.job.finish()

This is where the actual work happens

Whatever the worker is supposed to do, it should happen (or be initiated from) this method. By default it does nothing, descending classes should implement this method.

def refresh_settings(self):
38    def refresh_settings(self):
39        """
40        Refresh settings
41        """
42        # LLM server settings
43        llm_provider = self.config.get("llm.provider_type", "none").lower()
44        llm_server = self.config.get("llm.server", "")
45
46        # For now we only support the Ollama API
47        if llm_provider == "ollama" and llm_server:
48            headers = {"Content-Type": "application/json"}
49            llm_api_key = self.config.get("llm.api_key", "")
50            llm_auth_type = self.config.get("llm.auth_type", "")
51            if llm_api_key and llm_auth_type:
52                headers[llm_auth_type] = llm_api_key
53
54            available_models = {}
55            try:
56                response = requests.get(f"{llm_server}/api/tags", headers=headers, timeout=10)
57                if response.status_code == 200:
58                    settings = response.json()
59                    for model in settings.get("models", []):
60                        model = model["name"]
61                        try:
62                            model_metadata = requests.post(f"{llm_server}/api/show", headers=headers, json={"model": model}, timeout=10).json()
63                            available_models[model] = {
64                                "name": f"{model_metadata['model_info']['general.basename']} ({model_metadata['details']['parameter_size']} parameters)",
65                                "model_card": f"https://ollama.com/library/{model}",
66                                "provider": "local"
67                            }
68                            
69                        except (requests.RequestException, json.JSONDecodeError, KeyError) as e:
70                            self.log.debug(f"Could not get metadata for model {model} from Ollama - skipping (error: {e})")
71
72                    self.config.set("llm.available_models", available_models)
73                    self.log.debug("Refreshed LLM server settings cache")
74                else:
75                    self.log.warning(f"Could not refresh LLM server settings cache - server returned status code {response.status_code}")
76
77            except requests.RequestException as e:
78                self.log.warning(f"Could not refresh LLM server settings cache - request error: {str(e)}")

Refresh settings