Edit on GitHub

backend.workers.restart_4cat

Restart 4CAT and optionally upgrade it to the latest release

  1"""
  2Restart 4CAT and optionally upgrade it to the latest release
  3"""
  4import subprocess
  5import requests
  6import hashlib
  7import shlex
  8import json
  9import time
 10import uuid
 11import sys
 12
 13from pathlib import Path
 14
 15from backend.lib.worker import BasicWorker
 16from common.lib.exceptions import WorkerInterruptedException
 17from common.config_manager import config
 18
 19
 20class FourcatRestarterAndUpgrader(BasicWorker):
 21    """
 22    Restart 4CAT and optionally upgrade it to the latest release
 23
 24    Why implement this as a worker? Trying to have 4CAT restart itself leads
 25    to an interesting conundrum: it will not be able to report the outcome of
 26    the restart, because whatever bit of code is keeping track of that will be
 27    interrupted by restarting 4CAT.
 28
 29    Using a worker has the benefit of it restarting after 4CAT restarts, so it
 30    can then figure out that 4CAT was just restarted and report the outcome. It
 31    then uses a log file to keep track of the results. The log file can then be
 32    used by other parts of 4CAT to see if the restart was successful.
 33
 34    It does lead to another conundrum - what if due to some error, 4CAT never
 35    restarts? Then this worker will not be run again to report its own failure.
 36    There seem to be no clean ways around this, so anything watching the
 37    outcome of the worker probably needs to implement some timeout after which
 38    it is assumed that the restart/upgrade process failed catastrophically.
 39    """
 40    type = "restart-4cat"
 41    max_workers = 1
 42
 43    def work(self):
 44        """
 45        Restart 4CAT and optionally upgrade it to the latest release
 46        """
 47        # figure out if we're starting the restart or checking the result
 48        # after 4cat has been restarted
 49        is_resuming = self.job.data["attempts"] > 0
 50
 51        # prevent multiple restarts running at the same time which could blow
 52        # up really fast
 53        lock_file = Path(config.get("PATH_ROOT")).joinpath("config/restart.lock")
 54
 55        # this file has the log of the restart worker itself and is checked by
 56        # the frontend to see how far we are
 57        log_file_restart = Path(config.get("PATH_ROOT")).joinpath(config.get("PATH_LOGS")).joinpath("restart.log")
 58        log_stream_restart = log_file_restart.open("a")
 59
 60        if not is_resuming:
 61            log_stream_restart.write("Initiating 4CAT restart worker\n")
 62            self.log.info("New restart initiated.")
 63
 64            # this lock file will ensure that people don't start two
 65            # simultaneous upgrades or something
 66            with lock_file.open("w") as outfile:
 67                hasher = hashlib.blake2b()
 68                hasher.update(str(uuid.uuid4()).encode("utf-8"))
 69                outfile.write(hasher.hexdigest())
 70
 71            # trigger a restart and/or upgrade
 72            # returns a JSON with a 'status' key and a message, the message
 73            # being the process output
 74
 75            if self.job.data["remote_id"].startswith("upgrade"):
 76                command = sys.executable + " helper-scripts/migrate.py --repository %s --yes --restart --output %s" % \
 77                          (shlex.quote(config.get("4cat.github_url")), shlex.quote(str(log_file_restart)))
 78                if self.job.details and self.job.details.get("branch"):
 79                    # migrate to code in specific branch
 80                    command += f" --branch {shlex.quote(self.job.details['branch'])}"
 81                else:
 82                    # migrate to latest release
 83                    command += " --release"
 84
 85            else:
 86                command = sys.executable + " 4cat-daemon.py --no-version-check force-restart"
 87
 88            try:
 89                # flush any writes before the other process starts writing to
 90                # the stream
 91                self.log.info(f"Running command {command}")
 92                log_stream_restart.flush()
 93
 94                # the tricky part is that this command will interrupt the
 95                # daemon, i.e. this worker!
 96                # so we'll never get to actually send a response, if all goes
 97                # well. but the file descriptor that stdout is piped to remains
 98                # open, somehow, so we can use that to keep track of the output
 99                # stdin needs to be /dev/null here because else when 4CAT
100                # restarts and we re-attempt to make a daemon, it will fail
101                # when trying to close the stdin file descriptor of the
102                # subprocess (man, that was a fun bug to hunt down)
103                process = subprocess.Popen(shlex.split(command), cwd=str(config.get("PATH_ROOT")),
104                                           stdout=log_stream_restart, stderr=log_stream_restart,
105                                           stdin=subprocess.DEVNULL)
106
107                while not self.interrupted:
108                    # basically wait for either the process to quit or 4CAT to
109                    # be restarted (hopefully the latter)
110                    try:
111                        # now see if the process is finished - if not a
112                        # TimeoutExpired will be raised
113                        process.wait(1)
114                        break
115                    except subprocess.TimeoutExpired:
116                        pass
117
118                if process.returncode is not None:
119                    # if we reach this, 4CAT was never restarted, and so the job failed
120                    log_stream_restart.write(
121                        f"\nUnexpected outcome of restart call ({process.returncode})\n")
122
123                    raise RuntimeError()
124                else:
125                    # interrupted before the process could finish (as it should)
126                    self.log.info("Restart triggered. Restarting 4CAT.\n")
127                    raise WorkerInterruptedException()
128
129            except (RuntimeError, subprocess.CalledProcessError) as e:
130                log_stream_restart.write(str(e))
131                log_stream_restart.write(
132                    "[Worker] Error while restarting 4CAT. The script returned a non-standard error code "
133                    "(see above). You may need to restart 4CAT manually.\n")
134                self.log.error(f"Error restarting 4CAT. See {log_stream_restart.name} for details.")
135                lock_file.unlink()
136                self.job.finish()
137
138            finally:
139                log_stream_restart.close()
140
141        else:
142            # 4CAT back-end was restarted - now check the results and make the
143            # front-end restart or upgrade too
144            self.log.info("Restart worker resumed after restarting 4CAT, restart successful.")
145            log_stream_restart.write("4CAT restarted.\n")
146            with Path(config.get("PATH_ROOT")).joinpath("config/.current-version").open() as infile:
147                log_stream_restart.write(f"4CAT is now running version {infile.readline().strip()}.\n")
148
149            # we're gonna use some specific Flask routes to trigger this, i.e.
150            # we're interacting with the front-end through HTTP
151            api_host = "https://" if config.get("flask.https") else "http://"
152            if config.get("USING_DOCKER"):
153                import os
154                docker_exposed_port = os.environ['PUBLIC_PORT']
155                api_host += f"host.docker.internal{':' + docker_exposed_port if docker_exposed_port != '80' else ''}"
156            else:
157                api_host += config.get("flask.server_name")
158
159            if self.job.data["remote_id"].startswith("upgrade") and config.get("USING_DOCKER"):
160                # when using Docker, the front-end needs to update separately
161                log_stream_restart.write("Telling front-end Docker container to upgrade...\n")
162                log_stream_restart.close()  # close, because front-end will be writing to it
163                upgrade_ok = False
164                upgrade_timeout = False
165                try:
166                    upgrade_url = api_host + "/admin/trigger-frontend-upgrade/"
167                    with lock_file.open() as infile:
168                        frontend_upgrade = requests.post(upgrade_url, data={"token": infile.read()}, timeout=(10 * 60))
169                    upgrade_ok = frontend_upgrade.json()["status"] == "OK"
170                except requests.RequestException:
171                    pass
172                except TimeoutError:
173                    upgrade_timeout = True
174
175                log_stream_restart = log_file_restart.open("a")
176                if not upgrade_ok:
177                    if upgrade_timeout:
178                        log_stream_restart.write("Upgrade timed out.")
179                    log_stream_restart.write("Error upgrading front-end container. You may need to upgrade and restart"
180                                             "containers manually.\n")
181                    lock_file.unlink()
182                    return self.job.finish()
183
184            # restart front-end
185            log_stream_restart.write("Asking front-end to restart itself...\n")
186            log_stream_restart.flush()
187            try:
188                restart_url = api_host + "/admin/trigger-frontend-restart/"
189                with lock_file.open() as infile:
190                    response = requests.post(restart_url, data={"token": infile.read()}, timeout=5).json()
191
192                if response.get("message"):
193                    log_stream_restart.write(response.get("message") + "\n")
194            except (json.JSONDecodeError, requests.RequestException):
195                # this may happen because the server restarts and interrupts
196                # the request
197                pass
198
199            # wait for front-end to come online after a restart
200            time.sleep(3)  # give some time for the restart to trigger
201            start_time = time.time()
202            frontend_ok = False
203            while time.time() < start_time + 60:
204                try:
205                    frontend = requests.get(api_host + "/", timeout=5)
206                    if frontend.status_code > 401:
207                        time.sleep(2)
208                        continue
209                    frontend_ok = True
210                    break
211                except requests.RequestException as e:
212                    time.sleep(1)
213                    continue
214
215            # too bad
216            if not frontend_ok:
217                log_stream_restart.write("Timed out waiting for front-end to restart. You may need to restart it "
218                                         "manually.\n")
219                self.log.error("Front-end did not come back online after restart")
220            else:
221                log_stream_restart.write("Front-end is available. Restart complete.")
222                self.log.info("Front-end is available. Restart complete.")
223
224            log_stream_restart.close()
225            lock_file.unlink()
226
227            self.job.finish()
class FourcatRestarterAndUpgrader(backend.lib.worker.BasicWorker):
 21class FourcatRestarterAndUpgrader(BasicWorker):
 22    """
 23    Restart 4CAT and optionally upgrade it to the latest release
 24
 25    Why implement this as a worker? Trying to have 4CAT restart itself leads
 26    to an interesting conundrum: it will not be able to report the outcome of
 27    the restart, because whatever bit of code is keeping track of that will be
 28    interrupted by restarting 4CAT.
 29
 30    Using a worker has the benefit of it restarting after 4CAT restarts, so it
 31    can then figure out that 4CAT was just restarted and report the outcome. It
 32    then uses a log file to keep track of the results. The log file can then be
 33    used by other parts of 4CAT to see if the restart was successful.
 34
 35    It does lead to another conundrum - what if due to some error, 4CAT never
 36    restarts? Then this worker will not be run again to report its own failure.
 37    There seem to be no clean ways around this, so anything watching the
 38    outcome of the worker probably needs to implement some timeout after which
 39    it is assumed that the restart/upgrade process failed catastrophically.
 40    """
 41    type = "restart-4cat"
 42    max_workers = 1
 43
 44    def work(self):
 45        """
 46        Restart 4CAT and optionally upgrade it to the latest release
 47        """
 48        # figure out if we're starting the restart or checking the result
 49        # after 4cat has been restarted
 50        is_resuming = self.job.data["attempts"] > 0
 51
 52        # prevent multiple restarts running at the same time which could blow
 53        # up really fast
 54        lock_file = Path(config.get("PATH_ROOT")).joinpath("config/restart.lock")
 55
 56        # this file has the log of the restart worker itself and is checked by
 57        # the frontend to see how far we are
 58        log_file_restart = Path(config.get("PATH_ROOT")).joinpath(config.get("PATH_LOGS")).joinpath("restart.log")
 59        log_stream_restart = log_file_restart.open("a")
 60
 61        if not is_resuming:
 62            log_stream_restart.write("Initiating 4CAT restart worker\n")
 63            self.log.info("New restart initiated.")
 64
 65            # this lock file will ensure that people don't start two
 66            # simultaneous upgrades or something
 67            with lock_file.open("w") as outfile:
 68                hasher = hashlib.blake2b()
 69                hasher.update(str(uuid.uuid4()).encode("utf-8"))
 70                outfile.write(hasher.hexdigest())
 71
 72            # trigger a restart and/or upgrade
 73            # returns a JSON with a 'status' key and a message, the message
 74            # being the process output
 75
 76            if self.job.data["remote_id"].startswith("upgrade"):
 77                command = sys.executable + " helper-scripts/migrate.py --repository %s --yes --restart --output %s" % \
 78                          (shlex.quote(config.get("4cat.github_url")), shlex.quote(str(log_file_restart)))
 79                if self.job.details and self.job.details.get("branch"):
 80                    # migrate to code in specific branch
 81                    command += f" --branch {shlex.quote(self.job.details['branch'])}"
 82                else:
 83                    # migrate to latest release
 84                    command += " --release"
 85
 86            else:
 87                command = sys.executable + " 4cat-daemon.py --no-version-check force-restart"
 88
 89            try:
 90                # flush any writes before the other process starts writing to
 91                # the stream
 92                self.log.info(f"Running command {command}")
 93                log_stream_restart.flush()
 94
 95                # the tricky part is that this command will interrupt the
 96                # daemon, i.e. this worker!
 97                # so we'll never get to actually send a response, if all goes
 98                # well. but the file descriptor that stdout is piped to remains
 99                # open, somehow, so we can use that to keep track of the output
100                # stdin needs to be /dev/null here because else when 4CAT
101                # restarts and we re-attempt to make a daemon, it will fail
102                # when trying to close the stdin file descriptor of the
103                # subprocess (man, that was a fun bug to hunt down)
104                process = subprocess.Popen(shlex.split(command), cwd=str(config.get("PATH_ROOT")),
105                                           stdout=log_stream_restart, stderr=log_stream_restart,
106                                           stdin=subprocess.DEVNULL)
107
108                while not self.interrupted:
109                    # basically wait for either the process to quit or 4CAT to
110                    # be restarted (hopefully the latter)
111                    try:
112                        # now see if the process is finished - if not a
113                        # TimeoutExpired will be raised
114                        process.wait(1)
115                        break
116                    except subprocess.TimeoutExpired:
117                        pass
118
119                if process.returncode is not None:
120                    # if we reach this, 4CAT was never restarted, and so the job failed
121                    log_stream_restart.write(
122                        f"\nUnexpected outcome of restart call ({process.returncode})\n")
123
124                    raise RuntimeError()
125                else:
126                    # interrupted before the process could finish (as it should)
127                    self.log.info("Restart triggered. Restarting 4CAT.\n")
128                    raise WorkerInterruptedException()
129
130            except (RuntimeError, subprocess.CalledProcessError) as e:
131                log_stream_restart.write(str(e))
132                log_stream_restart.write(
133                    "[Worker] Error while restarting 4CAT. The script returned a non-standard error code "
134                    "(see above). You may need to restart 4CAT manually.\n")
135                self.log.error(f"Error restarting 4CAT. See {log_stream_restart.name} for details.")
136                lock_file.unlink()
137                self.job.finish()
138
139            finally:
140                log_stream_restart.close()
141
142        else:
143            # 4CAT back-end was restarted - now check the results and make the
144            # front-end restart or upgrade too
145            self.log.info("Restart worker resumed after restarting 4CAT, restart successful.")
146            log_stream_restart.write("4CAT restarted.\n")
147            with Path(config.get("PATH_ROOT")).joinpath("config/.current-version").open() as infile:
148                log_stream_restart.write(f"4CAT is now running version {infile.readline().strip()}.\n")
149
150            # we're gonna use some specific Flask routes to trigger this, i.e.
151            # we're interacting with the front-end through HTTP
152            api_host = "https://" if config.get("flask.https") else "http://"
153            if config.get("USING_DOCKER"):
154                import os
155                docker_exposed_port = os.environ['PUBLIC_PORT']
156                api_host += f"host.docker.internal{':' + docker_exposed_port if docker_exposed_port != '80' else ''}"
157            else:
158                api_host += config.get("flask.server_name")
159
160            if self.job.data["remote_id"].startswith("upgrade") and config.get("USING_DOCKER"):
161                # when using Docker, the front-end needs to update separately
162                log_stream_restart.write("Telling front-end Docker container to upgrade...\n")
163                log_stream_restart.close()  # close, because front-end will be writing to it
164                upgrade_ok = False
165                upgrade_timeout = False
166                try:
167                    upgrade_url = api_host + "/admin/trigger-frontend-upgrade/"
168                    with lock_file.open() as infile:
169                        frontend_upgrade = requests.post(upgrade_url, data={"token": infile.read()}, timeout=(10 * 60))
170                    upgrade_ok = frontend_upgrade.json()["status"] == "OK"
171                except requests.RequestException:
172                    pass
173                except TimeoutError:
174                    upgrade_timeout = True
175
176                log_stream_restart = log_file_restart.open("a")
177                if not upgrade_ok:
178                    if upgrade_timeout:
179                        log_stream_restart.write("Upgrade timed out.")
180                    log_stream_restart.write("Error upgrading front-end container. You may need to upgrade and restart"
181                                             "containers manually.\n")
182                    lock_file.unlink()
183                    return self.job.finish()
184
185            # restart front-end
186            log_stream_restart.write("Asking front-end to restart itself...\n")
187            log_stream_restart.flush()
188            try:
189                restart_url = api_host + "/admin/trigger-frontend-restart/"
190                with lock_file.open() as infile:
191                    response = requests.post(restart_url, data={"token": infile.read()}, timeout=5).json()
192
193                if response.get("message"):
194                    log_stream_restart.write(response.get("message") + "\n")
195            except (json.JSONDecodeError, requests.RequestException):
196                # this may happen because the server restarts and interrupts
197                # the request
198                pass
199
200            # wait for front-end to come online after a restart
201            time.sleep(3)  # give some time for the restart to trigger
202            start_time = time.time()
203            frontend_ok = False
204            while time.time() < start_time + 60:
205                try:
206                    frontend = requests.get(api_host + "/", timeout=5)
207                    if frontend.status_code > 401:
208                        time.sleep(2)
209                        continue
210                    frontend_ok = True
211                    break
212                except requests.RequestException as e:
213                    time.sleep(1)
214                    continue
215
216            # too bad
217            if not frontend_ok:
218                log_stream_restart.write("Timed out waiting for front-end to restart. You may need to restart it "
219                                         "manually.\n")
220                self.log.error("Front-end did not come back online after restart")
221            else:
222                log_stream_restart.write("Front-end is available. Restart complete.")
223                self.log.info("Front-end is available. Restart complete.")
224
225            log_stream_restart.close()
226            lock_file.unlink()
227
228            self.job.finish()

Restart 4CAT and optionally upgrade it to the latest release

Why implement this as a worker? Trying to have 4CAT restart itself leads to an interesting conundrum: it will not be able to report the outcome of the restart, because whatever bit of code is keeping track of that will be interrupted by restarting 4CAT.

Using a worker has the benefit of it restarting after 4CAT restarts, so it can then figure out that 4CAT was just restarted and report the outcome. It then uses a log file to keep track of the results. The log file can then be used by other parts of 4CAT to see if the restart was successful.

It does lead to another conundrum - what if due to some error, 4CAT never restarts? Then this worker will not be run again to report its own failure. There seem to be no clean ways around this, so anything watching the outcome of the worker probably needs to implement some timeout after which it is assumed that the restart/upgrade process failed catastrophically.

type = 'restart-4cat'
max_workers = 1
def work(self):
 44    def work(self):
 45        """
 46        Restart 4CAT and optionally upgrade it to the latest release
 47        """
 48        # figure out if we're starting the restart or checking the result
 49        # after 4cat has been restarted
 50        is_resuming = self.job.data["attempts"] > 0
 51
 52        # prevent multiple restarts running at the same time which could blow
 53        # up really fast
 54        lock_file = Path(config.get("PATH_ROOT")).joinpath("config/restart.lock")
 55
 56        # this file has the log of the restart worker itself and is checked by
 57        # the frontend to see how far we are
 58        log_file_restart = Path(config.get("PATH_ROOT")).joinpath(config.get("PATH_LOGS")).joinpath("restart.log")
 59        log_stream_restart = log_file_restart.open("a")
 60
 61        if not is_resuming:
 62            log_stream_restart.write("Initiating 4CAT restart worker\n")
 63            self.log.info("New restart initiated.")
 64
 65            # this lock file will ensure that people don't start two
 66            # simultaneous upgrades or something
 67            with lock_file.open("w") as outfile:
 68                hasher = hashlib.blake2b()
 69                hasher.update(str(uuid.uuid4()).encode("utf-8"))
 70                outfile.write(hasher.hexdigest())
 71
 72            # trigger a restart and/or upgrade
 73            # returns a JSON with a 'status' key and a message, the message
 74            # being the process output
 75
 76            if self.job.data["remote_id"].startswith("upgrade"):
 77                command = sys.executable + " helper-scripts/migrate.py --repository %s --yes --restart --output %s" % \
 78                          (shlex.quote(config.get("4cat.github_url")), shlex.quote(str(log_file_restart)))
 79                if self.job.details and self.job.details.get("branch"):
 80                    # migrate to code in specific branch
 81                    command += f" --branch {shlex.quote(self.job.details['branch'])}"
 82                else:
 83                    # migrate to latest release
 84                    command += " --release"
 85
 86            else:
 87                command = sys.executable + " 4cat-daemon.py --no-version-check force-restart"
 88
 89            try:
 90                # flush any writes before the other process starts writing to
 91                # the stream
 92                self.log.info(f"Running command {command}")
 93                log_stream_restart.flush()
 94
 95                # the tricky part is that this command will interrupt the
 96                # daemon, i.e. this worker!
 97                # so we'll never get to actually send a response, if all goes
 98                # well. but the file descriptor that stdout is piped to remains
 99                # open, somehow, so we can use that to keep track of the output
100                # stdin needs to be /dev/null here because else when 4CAT
101                # restarts and we re-attempt to make a daemon, it will fail
102                # when trying to close the stdin file descriptor of the
103                # subprocess (man, that was a fun bug to hunt down)
104                process = subprocess.Popen(shlex.split(command), cwd=str(config.get("PATH_ROOT")),
105                                           stdout=log_stream_restart, stderr=log_stream_restart,
106                                           stdin=subprocess.DEVNULL)
107
108                while not self.interrupted:
109                    # basically wait for either the process to quit or 4CAT to
110                    # be restarted (hopefully the latter)
111                    try:
112                        # now see if the process is finished - if not a
113                        # TimeoutExpired will be raised
114                        process.wait(1)
115                        break
116                    except subprocess.TimeoutExpired:
117                        pass
118
119                if process.returncode is not None:
120                    # if we reach this, 4CAT was never restarted, and so the job failed
121                    log_stream_restart.write(
122                        f"\nUnexpected outcome of restart call ({process.returncode})\n")
123
124                    raise RuntimeError()
125                else:
126                    # interrupted before the process could finish (as it should)
127                    self.log.info("Restart triggered. Restarting 4CAT.\n")
128                    raise WorkerInterruptedException()
129
130            except (RuntimeError, subprocess.CalledProcessError) as e:
131                log_stream_restart.write(str(e))
132                log_stream_restart.write(
133                    "[Worker] Error while restarting 4CAT. The script returned a non-standard error code "
134                    "(see above). You may need to restart 4CAT manually.\n")
135                self.log.error(f"Error restarting 4CAT. See {log_stream_restart.name} for details.")
136                lock_file.unlink()
137                self.job.finish()
138
139            finally:
140                log_stream_restart.close()
141
142        else:
143            # 4CAT back-end was restarted - now check the results and make the
144            # front-end restart or upgrade too
145            self.log.info("Restart worker resumed after restarting 4CAT, restart successful.")
146            log_stream_restart.write("4CAT restarted.\n")
147            with Path(config.get("PATH_ROOT")).joinpath("config/.current-version").open() as infile:
148                log_stream_restart.write(f"4CAT is now running version {infile.readline().strip()}.\n")
149
150            # we're gonna use some specific Flask routes to trigger this, i.e.
151            # we're interacting with the front-end through HTTP
152            api_host = "https://" if config.get("flask.https") else "http://"
153            if config.get("USING_DOCKER"):
154                import os
155                docker_exposed_port = os.environ['PUBLIC_PORT']
156                api_host += f"host.docker.internal{':' + docker_exposed_port if docker_exposed_port != '80' else ''}"
157            else:
158                api_host += config.get("flask.server_name")
159
160            if self.job.data["remote_id"].startswith("upgrade") and config.get("USING_DOCKER"):
161                # when using Docker, the front-end needs to update separately
162                log_stream_restart.write("Telling front-end Docker container to upgrade...\n")
163                log_stream_restart.close()  # close, because front-end will be writing to it
164                upgrade_ok = False
165                upgrade_timeout = False
166                try:
167                    upgrade_url = api_host + "/admin/trigger-frontend-upgrade/"
168                    with lock_file.open() as infile:
169                        frontend_upgrade = requests.post(upgrade_url, data={"token": infile.read()}, timeout=(10 * 60))
170                    upgrade_ok = frontend_upgrade.json()["status"] == "OK"
171                except requests.RequestException:
172                    pass
173                except TimeoutError:
174                    upgrade_timeout = True
175
176                log_stream_restart = log_file_restart.open("a")
177                if not upgrade_ok:
178                    if upgrade_timeout:
179                        log_stream_restart.write("Upgrade timed out.")
180                    log_stream_restart.write("Error upgrading front-end container. You may need to upgrade and restart"
181                                             "containers manually.\n")
182                    lock_file.unlink()
183                    return self.job.finish()
184
185            # restart front-end
186            log_stream_restart.write("Asking front-end to restart itself...\n")
187            log_stream_restart.flush()
188            try:
189                restart_url = api_host + "/admin/trigger-frontend-restart/"
190                with lock_file.open() as infile:
191                    response = requests.post(restart_url, data={"token": infile.read()}, timeout=5).json()
192
193                if response.get("message"):
194                    log_stream_restart.write(response.get("message") + "\n")
195            except (json.JSONDecodeError, requests.RequestException):
196                # this may happen because the server restarts and interrupts
197                # the request
198                pass
199
200            # wait for front-end to come online after a restart
201            time.sleep(3)  # give some time for the restart to trigger
202            start_time = time.time()
203            frontend_ok = False
204            while time.time() < start_time + 60:
205                try:
206                    frontend = requests.get(api_host + "/", timeout=5)
207                    if frontend.status_code > 401:
208                        time.sleep(2)
209                        continue
210                    frontend_ok = True
211                    break
212                except requests.RequestException as e:
213                    time.sleep(1)
214                    continue
215
216            # too bad
217            if not frontend_ok:
218                log_stream_restart.write("Timed out waiting for front-end to restart. You may need to restart it "
219                                         "manually.\n")
220                self.log.error("Front-end did not come back online after restart")
221            else:
222                log_stream_restart.write("Front-end is available. Restart complete.")
223                self.log.info("Front-end is available. Restart complete.")
224
225            log_stream_restart.close()
226            lock_file.unlink()
227
228            self.job.finish()

Restart 4CAT and optionally upgrade it to the latest release