From 085d5ba822fe86e7e3f5d17169f7198d87d8eef6 Mon Sep 17 00:00:00 2001 From: Florian da Costa Date: Wed, 10 Feb 2021 13:34:45 +0100 Subject: [PATCH] Vacuum done jobs in batch Huge amount of jobs to delete may take a lot of time and the cron may crash, because of timeout for instance This issue would make the cron permanently run and permanently failing... Deleting batch allow to avoid this issue if the cron fails, it still will have delete some of the job history --- queue_job/models/queue_job.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 9376fab07d..bdde5e0f7b 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -286,11 +286,19 @@ def autovacuum(self): """ for channel in self.env["queue.job.channel"].search([]): deadline = datetime.now() - timedelta(days=int(channel.removal_interval)) - jobs = self.search( - [("date_done", "<=", deadline), ("channel", "=", channel.complete_name)] - ) - if jobs: - jobs.unlink() + while True: + jobs = self.search( + [ + ("date_done", "<=", deadline), + ("channel", "=", channel.complete_name), + ], + limit=1000, + ) + if jobs: + jobs.unlink() + self.env.cr.commit() + else: + break return True def requeue_stuck_jobs(self, enqueued_delta=5, started_delta=0):