[CI] test_pdffile hangs test pipeline
The PDFFile tests can fail at teardown time, probably due to follow-up issues after an exception in the test itself. When this happens, the backgrounded CHromium subprocess is never terminated, causing the test sutie to hang indefinitely.
――――――――――――――――――――――――――――――――――――――――――――― ERROR at teardown of PDFFIleTest.test_delete_expired_files ――――――――――――――――――――――――――――――――――――――――――――――
request = <SubRequest 'celery_worker' for <TestCaseFunction test_delete_expired_files>>, celery_app = <Celery celery.tests at 0x7f49f8250d60>
celery_includes = (), celery_worker_pool = 'solo', celery_worker_parameters = {}
@pytest.fixture()
def celery_worker(request,
celery_app,
celery_includes,
celery_worker_pool,
celery_worker_parameters):
# type: (Any, Celery, Sequence[str], str, Any) -> WorkController
"""Fixture: Start worker in a thread, stop it when the test returns."""
from .testing import worker
if not NO_WORKER:
for module in celery_includes:
celery_app.loader.import_task_module(module)
with worker.start_worker(celery_app,
pool=celery_worker_pool,
**celery_worker_parameters) as w:
> yield w
.tox/globalenv/lib/python3.9/site-packages/celery/contrib/pytest.py:210:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.9/contextlib.py:126: in __exit__
next(self.gen)
.tox/globalenv/lib/python3.9/site-packages/celery/contrib/testing/worker.py:88: in start_worker
yield worker
/usr/lib/python3.9/contextlib.py:126: in __exit__
next(self.gen)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
app = <Celery celery.tests at 0x7f49f8250d60>, concurrency = 1, pool = 'solo', loglevel = 'error', logfile = None
WorkController = <class 'celery.contrib.testing.worker.TestWorkController'>, perform_ping_check = True, shutdown_timeout = 10.0, kwargs = {}
conn = <Connection: memory://localhost// at 0x7f49f88ca6d0>, worker = <Worker: gen916601@frifot (running)>
t = <Thread(Thread-2, started daemon 139955659757120)>
state = <module 'celery.worker.state' from '/home/nik/AlekSIS/AlekSIS/apps/official/AlekSIS-Core/.tox/globalenv/lib/python3.9/site-packages/celery/worker/state.py'>
@contextmanager
def _start_worker_thread(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
WorkController=TestWorkController,
perform_ping_check=True,
shutdown_timeout=10.0,
**kwargs):
# type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable
"""Start Celery worker in a thread.
Yields:
celery.worker.Worker: worker instance.
"""
setup_app_for_worker(app, loglevel, logfile)
if perform_ping_check:
assert 'celery.ping' in app.tasks
# Make sure we can connect to the broker
with app.connection(hostname=os.environ.get('TEST_BROKER')) as conn:
conn.default_channel.queue_declare
worker = WorkController(
app=app,
concurrency=concurrency,
hostname=anon_nodename(),
pool=pool,
loglevel=loglevel,
logfile=logfile,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
without_heartbeat=kwargs.pop("without_heartbeat", True),
without_mingle=True,
without_gossip=True,
**kwargs)
t = threading.Thread(target=worker.start, daemon=True)
t.start()
worker.ensure_started()
_set_task_join_will_block(False)
yield worker
from celery.worker import state
state.should_terminate = 0
t.join(shutdown_timeout)
if t.is_alive():
> raise RuntimeError(
"Worker thread failed to exit within the allocated timeout. "
"Consider raising `shutdown_timeout` if your tasks take longer "
"to execute."
)
E RuntimeError: Worker thread failed to exit within the allocated timeout. Consider raising `shutdown_timeout` if your tasks take longer to execute.
.tox/globalenv/lib/python3.9/site-packages/celery/contrib/testing/worker.py:140: RuntimeError
---------------------------------------------------------------- Captured stderr setup ----------------------------------------------------------------
WARNING 2022-01-03 16:34:41,622 connection: No hostname was supplied. Reverting to default 'localhost'
WARNING 2022-01-03 16:34:41,629 connection: No hostname was supplied. Reverting to default 'localhost'
---------------------------------------------------------------- Captured stderr call -----------------------------------------------------------------
ERROR 2022-01-03 16:37:13,470 consumer: Received unregistered task of type 'haystack_signal_handler'.
The message has been ignored and discarded.
Did you remember to import the module containing this task?
Or maybe you're using relative imports?
Please see
http://docs.celeryq.org/en/latest/internals/protocol.html
for more information.
The full contents of the message body was:
b'[["update", "core.person.26"], {}, {"callbacks": null, "errbacks": null, "chain": null, "chord": null}]' (103b)
Traceback (most recent call last):
File "/home/nik/AlekSIS/AlekSIS/apps/official/AlekSIS-Core/.tox/globalenv/lib/python3.9/site-packages/celery/worker/consumer/consumer.py", line 581, in on_task_received
strategy = strategies[type_]
KeyError: 'haystack_signal_handler'
------------------------------------------------------------------ Captured log call ------------------------------------------------------------------
ERROR celery.worker.consumer.consumer:consumer.py:514 Received unregistered task of type 'haystack_signal_handler'.
The message has been ignored and discarded.
Did you remember to import the module containing this task?
Or maybe you're using relative imports?
Please see
http://docs.celeryq.org/en/latest/internals/protocol.html
for more information.
The full contents of the message body was:
b'[["update", "core.person.26"], {}, {"callbacks": null, "errbacks": null, "chain": null, "chord": null}]' (103b)
Traceback (most recent call last):
File "/home/nik/AlekSIS/AlekSIS/apps/official/AlekSIS-Core/.tox/globalenv/lib/python3.9/site-packages/celery/worker/consumer/consumer.py", line 581, in on_task_received
strategy = strategies[type_]
KeyError: 'haystack_signal_handler'