From d4db8a79489988b303f299d73deab9283a6ae78d Mon Sep 17 00:00:00 2001 From: Florian da Costa Date: Wed, 10 Feb 2021 13:34:45 +0100 Subject: [PATCH] Vacuum done jobs in batch Huge amount of jobs to delete may take a lot of time and the cron may crash, because of timeout for instance This issue would make the cron permanently run and permanently failing... Deleting batch allow to avoid this issue if the cron fails, it still will have delete some of the job history --- queue_job/models/queue_job.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index fd848861df..40322795c9 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -254,11 +254,19 @@ def autovacuum(self): """ for channel in self.env["queue.job.channel"].search([]): deadline = datetime.now() - timedelta(days=int(channel.removal_interval)) - jobs = self.search( - [("date_done", "<=", deadline), ("channel", "=", channel.complete_name)] - ) - if jobs: - jobs.unlink() + while True: + jobs = self.search( + [ + ("date_done", "<=", deadline), + ("channel", "=", channel.complete_name), + ], + limit=1000, + ) + if jobs: + jobs.unlink() + self.env.cr.commit() + else: + break return True def requeue_stuck_jobs(self, enqueued_delta=5, started_delta=0):