diff --git a/queue_job/__manifest__.py b/queue_job/__manifest__.py index ecf64fd8f3..57084deccc 100644 --- a/queue_job/__manifest__.py +++ b/queue_job/__manifest__.py @@ -3,7 +3,7 @@ { "name": "Job Queue", - "version": "15.0.1.0.2", + "version": "15.0.1.1.0", "author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)", "website": "https://github.com/OCA/queue", "license": "LGPL-3", @@ -17,6 +17,7 @@ "views/queue_job_channel_views.xml", "views/queue_job_function_views.xml", "wizards/queue_jobs_to_done_views.xml", + "wizards/queue_jobs_to_cancelled_views.xml", "wizards/queue_requeue_job_views.xml", "views/queue_job_menus.xml", "data/queue_data.xml", diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index a68897e7f4..33b5778476 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -74,10 +74,10 @@ def retry_postpone(job, message, seconds=None): if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: raise - retry_postpone( - job, tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY - ) _logger.debug("%s OperationalError, postponed", job) + raise RetryableJobError( + tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY + ) from err except NothingToDoJob as err: if str(err): @@ -92,23 +92,42 @@ def retry_postpone(job, message, seconds=None): # delay the job later, requeue retry_postpone(job, str(err), seconds=err.seconds) _logger.debug("%s postponed", job) + # Do not trigger the error up because we don't want an exception + # traceback in the logs we should have the traceback when all + # retries are exhausted + env.cr.rollback() - except (FailedJobError, Exception): + except (FailedJobError, Exception) as orig_exception: buff = StringIO() traceback.print_exc(file=buff) - _logger.error(buff.getvalue()) + traceback_txt = buff.getvalue() + _logger.error(traceback_txt) job.env.clear() with registry(job.env.cr.dbname).cursor() as new_cr: - job.env = api.Environment(new_cr, SUPERUSER_ID, {}) - job.set_failed(exc_info=buff.getvalue()) + job.env = job.env(cr=new_cr) + vals = self._get_failure_values(job, traceback_txt, orig_exception) + job.set_failed(**vals) job.store() + buff.close() raise return "" + def _get_failure_values(self, job, traceback_txt, orig_exception): + """Collect relevant data from exception.""" + exception_name = orig_exception.__class__.__name__ + if hasattr(orig_exception, "__module__"): + exception_name = orig_exception.__module__ + "." + exception_name + exc_message = getattr(orig_exception, "name", str(orig_exception)) + return { + "exc_info": traceback_txt, + "exc_name": exception_name, + "exc_message": exc_message, + } + @http.route("/queue_job/create_test_job", type="http", auth="user") def create_test_job( - self, priority=None, max_retries=None, channel="root", description="Test job" + self, priority=None, max_retries=None, channel=None, description="Test job" ): if not http.request.env.user.has_group("base.group_erp_manager"): raise Forbidden(_("Access Denied")) diff --git a/queue_job/job.py b/queue_job/job.py index 4ee90390e6..794c7fb030 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -16,6 +16,7 @@ PENDING = "pending" ENQUEUED = "enqueued" +CANCELLED = "cancelled" DONE = "done" STARTED = "started" FAILED = "failed" @@ -25,6 +26,7 @@ (ENQUEUED, "Enqueued"), (STARTED, "Started"), (DONE, "Done"), + (CANCELLED, "Cancelled"), (FAILED, "Failed"), ] @@ -214,6 +216,14 @@ class Job(object): A description of the result (for humans). + .. attribute:: exc_name + + Exception error name when the job failed. + + .. attribute:: exc_message + + Exception error message when the job failed. + .. attribute:: exc_info Exception information (traceback) when the job failed. @@ -293,6 +303,9 @@ def _load_from_db_record(cls, job_db_record): if stored.date_done: job_.date_done = stored.date_done + if stored.date_cancelled: + job_.date_cancelled = stored.date_cancelled + job_.state = stored.state job_.result = stored.result if stored.result else None job_.exc_info = stored.exc_info if stored.exc_info else None @@ -441,13 +454,7 @@ def __init__( self.job_model_name = "queue.job" self.job_config = ( - self.env["queue.job.function"] - .sudo() - .job_config( - self.env["queue.job.function"].job_function_name( - self.model_name, self.method_name - ) - ) + self.env["queue.job.function"].sudo().job_config(self.job_function_name) ) self.state = PENDING @@ -482,8 +489,11 @@ def __init__( self.date_enqueued = None self.date_started = None self.date_done = None + self.date_cancelled = None self.result = None + self.exc_name = None + self.exc_message = None self.exc_info = None if "company_id" in env.context: @@ -524,17 +534,37 @@ def perform(self): def store(self): """Store the Job""" + job_model = self.env["queue.job"] + # The sentinel is used to prevent edition sensitive fields (such as + # method_name) from RPC methods. + edit_sentinel = job_model.EDIT_SENTINEL + + db_record = self.db_record() + if db_record: + db_record.with_context(_job_edit_sentinel=edit_sentinel).write( + self._store_values() + ) + else: + job_model.with_context(_job_edit_sentinel=edit_sentinel).sudo().create( + self._store_values(create=True) + ) + + def _store_values(self, create=False): vals = { "state": self.state, "priority": self.priority, "retry": self.retry, "max_retries": self.max_retries, + "exc_name": self.exc_name, + "exc_message": self.exc_message, "exc_info": self.exc_info, "company_id": self.company_id, "result": str(self.result) if self.result else False, "date_enqueued": False, "date_started": False, "date_done": False, + "exec_time": False, + "date_cancelled": False, "eta": False, "identity_key": False, "worker_pid": self.worker_pid, @@ -546,40 +576,61 @@ def store(self): vals["date_started"] = self.date_started if self.date_done: vals["date_done"] = self.date_done + if self.exec_time: + vals["exec_time"] = self.exec_time + if self.date_cancelled: + vals["date_cancelled"] = self.date_cancelled if self.eta: vals["eta"] = self.eta if self.identity_key: vals["identity_key"] = self.identity_key - job_model = self.env["queue.job"] - # The sentinel is used to prevent edition sensitive fields (such as - # method_name) from RPC methods. - edit_sentinel = job_model.EDIT_SENTINEL - - db_record = self.db_record() - if db_record: - db_record.with_context(_job_edit_sentinel=edit_sentinel).write(vals) - else: - date_created = self.date_created - # The following values must never be modified after the - # creation of the job + if create: vals.update( { + "user_id": self.env.uid, + "channel": self.channel, + # The following values must never be modified after the + # creation of the job "uuid": self.uuid, "name": self.description, - "date_created": date_created, + "func_string": self.func_string, + "date_created": self.date_created, + "model_name": self.recordset._name, "method_name": self.method_name, + "job_function_id": self.job_config.job_function_id, + "channel_method_name": self.job_function_name, "records": self.recordset, "args": self.args, "kwargs": self.kwargs, } ) - # it the channel is not specified, lets the job_model compute - # the right one to use - if self.channel: - vals.update({"channel": self.channel}) - job_model.with_context(_job_edit_sentinel=edit_sentinel).sudo().create(vals) + vals_from_model = self._store_values_from_model() + # Sanitize values: make sure you cannot screw core values + vals_from_model = {k: v for k, v in vals_from_model.items() if k not in vals} + vals.update(vals_from_model) + return vals + + def _store_values_from_model(self): + vals = {} + value_handlers_candidates = ( + "_job_store_values_for_" + self.method_name, + "_job_store_values", + ) + for candidate in value_handlers_candidates: + handler = getattr(self.recordset, candidate, None) + if handler is not None: + vals = handler(self) + return vals + + @property + def func_string(self): + model = repr(self.recordset) + args = [repr(arg) for arg in self.args] + kwargs = ["{}={!r}".format(key, val) for key, val in self.kwargs.items()] + all_args = ", ".join(args + kwargs) + return "{}.{}({})".format(model, self.method_name, all_args) def db_record(self): return self.db_record_from_uuid(self.env, self.uuid) @@ -589,6 +640,11 @@ def func(self): recordset = self.recordset.with_context(job_uuid=self.uuid) return getattr(recordset, self.method_name) + @property + def job_function_name(self): + func_model = self.env["queue.job.function"].sudo() + return func_model.job_function_name(self.recordset._name, self.method_name) + @property def identity_key(self): if self._identity_key is None: @@ -646,11 +702,27 @@ def eta(self, value): else: self._eta = value + @property + def channel(self): + return self._channel or self.job_config.channel + + @channel.setter + def channel(self, value): + self._channel = value + + @property + def exec_time(self): + if self.date_done and self.date_started: + return (self.date_done - self.date_started).total_seconds() + return None + def set_pending(self, result=None, reset_retry=True): self.state = PENDING self.date_enqueued = None self.date_started = None + self.date_done = None self.worker_pid = None + self.date_cancelled = None if reset_retry: self.retry = 0 if result is not None: @@ -669,15 +741,23 @@ def set_started(self): def set_done(self, result=None): self.state = DONE + self.exc_name = None self.exc_info = None self.date_done = datetime.now() if result is not None: self.result = result - def set_failed(self, exc_info=None): + def set_cancelled(self, result=None): + self.state = CANCELLED + self.date_cancelled = datetime.now() + if result is not None: + self.result = result + + def set_failed(self, **kw): self.state = FAILED - if exc_info is not None: - self.exc_info = exc_info + for k, v in kw.items(): + if v is not None: + setattr(self, k, v) def __repr__(self): return "" % (self.uuid, self.priority) @@ -708,6 +788,7 @@ def postpone(self, result=None, seconds=None): """ eta_seconds = self._get_retry_seconds(seconds) self.eta = timedelta(seconds=eta_seconds) + self.exc_name = None self.exc_info = None if result is not None: self.result = result diff --git a/queue_job/jobrunner/channels.py b/queue_job/jobrunner/channels.py index 536fec5f68..2d7e0a8be0 100644 --- a/queue_job/jobrunner/channels.py +++ b/queue_job/jobrunner/channels.py @@ -942,7 +942,9 @@ def get_channel_from_config(self, config): _logger.info("Configured channel: %s", channel) return channel - def get_channel_by_name(self, channel_name, autocreate=False): + def get_channel_by_name( + self, channel_name, autocreate=False, parent_fallback=False + ): """Return a Channel object by its name. If it does not exist and autocreate is True, it is created @@ -980,6 +982,9 @@ def get_channel_by_name(self, channel_name, autocreate=False): >>> c = cm.get_channel_by_name('sub') >>> c.fullname 'root.sub' + >>> c = cm.get_channel_by_name('root.sub.not.configured', parent_fallback=True) + >>> c.fullname + 'root.sub.sub.not.configured' """ if not channel_name or channel_name == self._root_channel.name: return self._root_channel @@ -987,9 +992,26 @@ def get_channel_by_name(self, channel_name, autocreate=False): channel_name = self._root_channel.name + "." + channel_name if channel_name in self._channels_by_name: return self._channels_by_name[channel_name] - if not autocreate: + if not autocreate and not parent_fallback: raise ChannelNotFound("Channel %s not found" % channel_name) parent = self._root_channel + if parent_fallback: + # Look for first direct parent w/ config. + # Eg: `root.edi.foo.baz` will falback on `root.edi.foo` + # or `root.edi` or `root` in sequence + parent_name = channel_name + while True: + parent_name = parent_name.rsplit(".", 1)[:-1][0] + if parent_name == self._root_channel.name: + break + if parent_name in self._channels_by_name: + parent = self._channels_by_name[parent_name] + _logger.debug( + "%s has no specific configuration: using %s", + channel_name, + parent_name, + ) + break for subchannel_name in channel_name.split(".")[1:]: subchannel = parent.get_subchannel_by_name(subchannel_name) if not subchannel: @@ -1001,13 +1023,7 @@ def get_channel_by_name(self, channel_name, autocreate=False): def notify( self, db_name, channel_name, uuid, seq, date_created, priority, eta, state ): - try: - channel = self.get_channel_by_name(channel_name) - except ChannelNotFound: - _logger.warning( - "unknown channel %s, using root channel for job %s", channel_name, uuid - ) - channel = self._root_channel + channel = self.get_channel_by_name(channel_name, parent_fallback=True) job = self._jobs_by_uuid.get(uuid) if job: # db_name is invariant diff --git a/queue_job/migrations/15.0.1.1.0/post-migration.py b/queue_job/migrations/15.0.1.1.0/post-migration.py new file mode 100644 index 0000000000..f6eff72707 --- /dev/null +++ b/queue_job/migrations/15.0.1.1.0/post-migration.py @@ -0,0 +1,47 @@ +# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) + +import logging + +from odoo import SUPERUSER_ID, api + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + with api.Environment.manage(): + env = api.Environment(cr, SUPERUSER_ID, {}) + _logger.info("Computing exception name for failed jobs") + _compute_jobs_new_values(env) + + +def _compute_jobs_new_values(env): + for job in env["queue.job"].search( + [("state", "=", "failed"), ("exc_info", "!=", False)] + ): + exception_details = _get_exception_details(job) + if exception_details: + job.update(exception_details) + + +def _get_exception_details(job): + for line in reversed(job.exc_info.splitlines()): + if _find_exception(line): + name, msg = line.split(":", 1) + return { + "exc_name": name.strip(), + "exc_message": msg.strip("()', \""), + } + + +def _find_exception(line): + # Just a list of common errors. + # If you want to target others, add your own migration step for your db. + exceptions = ( + "Error:", # catch all well named exceptions + # other live instance errors found + "requests.exceptions.MissingSchema", + "botocore.errorfactory.NoSuchKey", + ) + for exc in exceptions: + if exc in line: + return exc diff --git a/queue_job/migrations/15.0.1.1.0/pre-migration.py b/queue_job/migrations/15.0.1.1.0/pre-migration.py new file mode 100644 index 0000000000..8ae6cb3a5f --- /dev/null +++ b/queue_job/migrations/15.0.1.1.0/pre-migration.py @@ -0,0 +1,33 @@ +# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) + +from odoo.tools.sql import column_exists, table_exists + + +def migrate(cr, version): + if table_exists(cr, "queue_job") and not column_exists( + cr, "queue_job", "exec_time" + ): + # Disable trigger otherwise the update takes ages. + cr.execute( + """ + ALTER TABLE queue_job DISABLE TRIGGER queue_job_notify; + """ + ) + cr.execute( + """ + ALTER TABLE queue_job ADD COLUMN exec_time double precision DEFAULT 0; + """ + ) + cr.execute( + """ + UPDATE + queue_job + SET + exec_time = EXTRACT(EPOCH FROM (date_done - date_started)); + """ + ) + cr.execute( + """ + ALTER TABLE queue_job ENABLE TRIGGER queue_job_notify; + """ + ) diff --git a/queue_job/models/base.py b/queue_job/models/base.py index 5b1f8fee79..a398bf85fb 100644 --- a/queue_job/models/base.py +++ b/queue_job/models/base.py @@ -5,7 +5,7 @@ import logging import os -from odoo import models +from odoo import api, models from ..job import DelayableRecordset @@ -189,3 +189,19 @@ def auto_delay_wrapper(self, *args, **kwargs): origin = getattr(self, method_name) return functools.update_wrapper(auto_delay_wrapper, origin) + + @api.model + def _job_store_values(self, job): + """Hook for manipulating job stored values. + + You can define a more specific hook for a job function + by defining a method name with this pattern: + + `_queue_job_store_values_${func_name}` + + NOTE: values will be stored only if they match stored fields on `queue.job`. + + :param job: current queue_job.job.Job instance. + :return: dictionary for setting job values. + """ + return {} diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 157e4d8cbc..b6f8b52a6e 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -8,7 +8,7 @@ from odoo.osv import expression from ..fields import JobSerialized -from ..job import DONE, PENDING, STATES, Job +from ..job import CANCELLED, DONE, PENDING, STATES, Job _logger = logging.getLogger(__name__) @@ -37,27 +37,22 @@ class QueueJob(models.Model): "date_created", "model_name", "method_name", + "func_string", + "channel_method_name", + "job_function_id", "records", "args", "kwargs", ) uuid = fields.Char(string="UUID", readonly=True, index=True, required=True) - user_id = fields.Many2one( - comodel_name="res.users", - string="User ID", - compute="_compute_user_id", - inverse="_inverse_user_id", - store=True, - ) + user_id = fields.Many2one(comodel_name="res.users", string="User ID") company_id = fields.Many2one( comodel_name="res.company", string="Company", index=True ) name = fields.Char(string="Description", readonly=True) - model_name = fields.Char( - string="Model", compute="_compute_model_name", store=True, readonly=True - ) + model_name = fields.Char(string="Model", readonly=True) method_name = fields.Char(readonly=True) # record_ids field is only for backward compatibility (e.g. used in related # actions), can be removed (replaced by "records") in 14.0 @@ -69,12 +64,12 @@ class QueueJob(models.Model): ) args = JobSerialized(readonly=True, base_type=tuple) kwargs = JobSerialized(readonly=True, base_type=dict) - func_string = fields.Char( - string="Task", compute="_compute_func_string", readonly=True, store=True - ) + func_string = fields.Char(string="Task", readonly=True) state = fields.Selection(STATES, readonly=True, required=True, index=True) priority = fields.Integer() + exc_name = fields.Char(string="Exception", readonly=True) + exc_message = fields.Char(string="Exception Message", readonly=True) exc_info = fields.Text(string="Exception Info", readonly=True) result = fields.Text(readonly=True) @@ -82,6 +77,12 @@ class QueueJob(models.Model): date_started = fields.Datetime(string="Start Date", readonly=True) date_enqueued = fields.Datetime(string="Enqueue Time", readonly=True) date_done = fields.Datetime(readonly=True) + exec_time = fields.Float( + string="Execution Time (avg)", + group_operator="avg", + help="Time required to execute this job in seconds. Average when grouped.", + ) + date_cancelled = fields.Datetime(readonly=True) eta = fields.Datetime(string="Execute only after") retry = fields.Integer(string="Current try") @@ -91,24 +92,18 @@ class QueueJob(models.Model): "max. retries.\n" "Retries are infinite when empty.", ) - channel_method_name = fields.Char( - readonly=True, compute="_compute_job_function", store=True - ) + # FIXME the name of this field is very confusing + channel_method_name = fields.Char(readonly=True) job_function_id = fields.Many2one( comodel_name="queue.job.function", - compute="_compute_job_function", string="Job Function", readonly=True, - store=True, ) - override_channel = fields.Char() - channel = fields.Char( - compute="_compute_channel", inverse="_inverse_channel", store=True, index=True - ) + channel = fields.Char(index=True) - identity_key = fields.Char() - worker_pid = fields.Integer() + identity_key = fields.Char(readonly=True) + worker_pid = fields.Integer(readonly=True) def init(self): self._cr.execute( @@ -122,67 +117,23 @@ def init(self): "'enqueued') AND identity_key IS NOT NULL;" ) - @api.depends("records") - def _compute_user_id(self): - for record in self: - record.user_id = record.records.env.uid - - def _inverse_user_id(self): - for record in self.with_context(_job_edit_sentinel=self.EDIT_SENTINEL): - record.records = record.records.with_user(record.user_id.id) - - @api.depends("records") - def _compute_model_name(self): - for record in self: - record.model_name = record.records._name - @api.depends("records") def _compute_record_ids(self): for record in self: record.record_ids = record.records.ids - def _inverse_channel(self): - for record in self: - record.override_channel = record.channel - - @api.depends("job_function_id.channel_id") - def _compute_channel(self): - for record in self: - channel = ( - record.override_channel or record.job_function_id.channel or "root" - ) - if record.channel != channel: - record.channel = channel - - @api.depends("model_name", "method_name", "job_function_id.channel_id") - def _compute_job_function(self): - for record in self: - func_model = self.env["queue.job.function"] - channel_method_name = func_model.job_function_name( - record.model_name, record.method_name - ) - function = func_model.search([("name", "=", channel_method_name)], limit=1) - record.channel_method_name = channel_method_name - record.job_function_id = function - - @api.depends("model_name", "method_name", "records", "args", "kwargs") - def _compute_func_string(self): - for record in self: - model = repr(record.records) - args = [repr(arg) for arg in record.args] - kwargs = ["{}={!r}".format(key, val) for key, val in record.kwargs.items()] - all_args = ", ".join(args + kwargs) - record.func_string = "{}.{}({})".format(model, record.method_name, all_args) - @api.model_create_multi def create(self, vals_list): if self.env.context.get("_job_edit_sentinel") is not self.EDIT_SENTINEL: # Prevent to create a queue.job record "raw" from RPC. # ``with_delay()`` must be used. raise exceptions.AccessError( - _("Queue jobs must created by calling 'with_delay()'.") + _("Queue jobs must be created by calling 'with_delay()'.") ) - return super().create(vals_list) + return super( + QueueJob, + self.with_context(mail_create_nolog=True, mail_create_nosubscribe=True), + ).create(vals_list) def write(self, vals): if self.env.context.get("_job_edit_sentinel") is not self.EDIT_SENTINEL: @@ -196,10 +147,25 @@ def write(self, vals): ) ) + different_user_jobs = self.browse() + if vals.get("user_id"): + different_user_jobs = self.filtered( + lambda records: records.env.user.id != vals["user_id"] + ) + if vals.get("state") == "failed": self._message_post_on_failure() - return super().write(vals) + result = super().write(vals) + + for record in different_user_jobs: + # the user is stored in the env of the record, but we still want to + # have a stored user_id field to be able to search/groupby, so + # synchronize the env of records with user_id + super(QueueJob, record).write( + {"records": record.records.with_user(vals["user_id"])} + ) + return result def open_related_action(self): """Open the related action associated to the job""" @@ -222,6 +188,8 @@ def _change_job_state(self, state, result=None): job_.set_done(result=result) elif state == PENDING: job_.set_pending(result=result) + elif state == CANCELLED: + job_.set_cancelled(result=result) else: raise ValueError("State not supported: %s" % state) job_.store() @@ -231,6 +199,11 @@ def button_done(self): self._change_job_state(DONE, result=result) return True + def button_cancelled(self): + result = _("Cancelled by %s") % self.env.user.name + self._change_job_state(CANCELLED, result=result) + return True + def requeue(self): self._change_job_state(PENDING) return True @@ -239,9 +212,10 @@ def _message_post_on_failure(self): # subscribe the users now to avoid to subscribe them # at every job creation domain = self._subscribe_users_domain() - users = self.env["res.users"].search(domain) - self.message_subscribe(partner_ids=users.mapped("partner_id").ids) + base_users = self.env["res.users"].search(domain) for record in self: + users = base_users | record.user_id + record.message_subscribe(partner_ids=users.mapped("partner_id").ids) msg = record._message_failed_job() if msg: record.message_post(body=msg, subtype_xmlid="queue_job.mt_job_failed") @@ -289,7 +263,9 @@ def autovacuum(self): while True: jobs = self.search( [ + "|", ("date_done", "<=", deadline), + ("date_cancelled", "<=", deadline), ("channel", "=", channel.complete_name), ], limit=1000, diff --git a/queue_job/models/queue_job_function.py b/queue_job/models/queue_job_function.py index db9eea3c94..4f351659bd 100644 --- a/queue_job/models/queue_job_function.py +++ b/queue_job/models/queue_job_function.py @@ -27,7 +27,8 @@ class QueueJobFunction(models.Model): "retry_pattern " "related_action_enable " "related_action_func_name " - "related_action_kwargs ", + "related_action_kwargs " + "job_function_id ", ) def _default_channel(self): @@ -147,6 +148,7 @@ def job_default_config(self): related_action_enable=True, related_action_func_name=None, related_action_kwargs={}, + job_function_id=None, ) def _parse_retry_pattern(self): @@ -179,6 +181,7 @@ def job_config(self, name): related_action_enable=config.related_action.get("enable", True), related_action_func_name=config.related_action.get("func_name"), related_action_kwargs=config.related_action.get("kwargs", {}), + job_function_id=config.id, ) def _retry_pattern_format_error_message(self): diff --git a/queue_job/readme/CONTRIBUTORS.rst b/queue_job/readme/CONTRIBUTORS.rst index 0f8bb1a3b2..4b34823abe 100644 --- a/queue_job/readme/CONTRIBUTORS.rst +++ b/queue_job/readme/CONTRIBUTORS.rst @@ -9,3 +9,4 @@ * Tatiana Deribina * Souheil Bejaoui * Eric Antones +* Simone Orsi diff --git a/queue_job/readme/USAGE.rst b/queue_job/readme/USAGE.rst index 6c472eccf9..b595a9c595 100644 --- a/queue_job/readme/USAGE.rst +++ b/queue_job/readme/USAGE.rst @@ -43,6 +43,13 @@ they have different xmlids. On uninstall, the merged record is deleted when all the modules using it are uninstalled. +**Job function: model** + +If the function is defined in an abstract model, you can not write +```` +but you have to define a function for each model that inherits from the abstract model. + + **Job function: channel** The channel where the job will be delayed. The default channel is ``root``. diff --git a/queue_job/security/ir.model.access.csv b/queue_job/security/ir.model.access.csv index 9242305158..634daf8ede 100644 --- a/queue_job/security/ir.model.access.csv +++ b/queue_job/security/ir.model.access.csv @@ -4,3 +4,4 @@ access_queue_job_function_manager,queue job functions manager,queue_job.model_qu access_queue_job_channel_manager,queue job channel manager,queue_job.model_queue_job_channel,queue_job.group_queue_job_manager,1,1,1,1 access_queue_requeue_job,queue requeue job manager,queue_job.model_queue_requeue_job,queue_job.group_queue_job_manager,1,1,1,1 access_queue_jobs_to_done,queue jobs to done manager,queue_job.model_queue_jobs_to_done,queue_job.group_queue_job_manager,1,1,1,1 +access_queue_jobs_to_cancelled,queue jobs to cancelled manager,queue_job.model_queue_jobs_to_cancelled,queue_job.group_queue_job_manager,1,1,1,1 diff --git a/queue_job/tests/__init__.py b/queue_job/tests/__init__.py index 10138c469e..a556f10d83 100644 --- a/queue_job/tests/__init__.py +++ b/queue_job/tests/__init__.py @@ -4,3 +4,4 @@ from . import test_model_job_channel from . import test_model_job_function from . import test_queue_job_protected_write +from . import test_wizards diff --git a/queue_job/tests/test_model_job_function.py b/queue_job/tests/test_model_job_function.py index 965e26d8f2..84676fdb65 100644 --- a/queue_job/tests/test_model_job_function.py +++ b/queue_job/tests/test_model_job_function.py @@ -31,7 +31,7 @@ def test_function_job_config(self): channel = self.env["queue.job.channel"].create( {"name": "foo", "parent_id": self.env.ref("queue_job.channel_root").id} ) - self.env["queue.job.function"].create( + job_function = self.env["queue.job.function"].create( { "model_id": self.env.ref("base.model_res_users").id, "method": "read", @@ -52,5 +52,6 @@ def test_function_job_config(self): related_action_enable=True, related_action_func_name="related_action_foo", related_action_kwargs={"b": 1}, + job_function_id=job_function.id, ), ) diff --git a/queue_job/tests/test_wizards.py b/queue_job/tests/test_wizards.py new file mode 100644 index 0000000000..2ac162d313 --- /dev/null +++ b/queue_job/tests/test_wizards.py @@ -0,0 +1,48 @@ +# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) +from odoo.tests import common + + +class TestWizards(common.TransactionCase): + def setUp(self): + super().setUp() + self.job = ( + self.env["queue.job"] + .with_context( + _job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL, + ) + .create( + { + "uuid": "test", + "user_id": self.env.user.id, + "state": "failed", + "model_name": "queue.job", + "method_name": "write", + "args": (), + } + ) + ) + + def _wizard(self, model_name): + return ( + self.env[model_name] + .with_context( + active_model=self.job._name, + active_ids=self.job.ids, + ) + .create({}) + ) + + def test_01_requeue(self): + wizard = self._wizard("queue.requeue.job") + wizard.requeue() + self.assertEqual(self.job.state, "pending") + + def test_02_cancel(self): + wizard = self._wizard("queue.jobs.to.cancelled") + wizard.set_cancelled() + self.assertEqual(self.job.state, "cancelled") + + def test_03_done(self): + wizard = self._wizard("queue.jobs.to.done") + wizard.set_done() + self.assertEqual(self.job.state, "done") diff --git a/queue_job/views/queue_job_views.xml b/queue_job/views/queue_job_views.xml index a68c03d34f..cf049fdfeb 100644 --- a/queue_job/views/queue_job_views.xml +++ b/queue_job/views/queue_job_views.xml @@ -23,12 +23,20 @@ type="object" groups="queue_job.group_queue_job_manager" /> +