diff --git a/queue_job/__manifest__.py b/queue_job/__manifest__.py index 2e5a2ef610..d100873272 100644 --- a/queue_job/__manifest__.py +++ b/queue_job/__manifest__.py @@ -3,7 +3,7 @@ { "name": "Job Queue", - "version": "14.0.1.3.4", + "version": "14.0.1.4.0", "author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)", "website": "https://github.com/OCA/queue", "license": "LGPL-3", diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index 8078285ff3..1b35c1ac99 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -77,10 +77,10 @@ def retry_postpone(job, message, seconds=None): if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: raise - retry_postpone( - job, tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY - ) _logger.debug("%s OperationalError, postponed", job) + raise RetryableJobError( + tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY + ) except NothingToDoJob as err: if str(err): @@ -95,25 +95,44 @@ def retry_postpone(job, message, seconds=None): # delay the job later, requeue retry_postpone(job, str(err), seconds=err.seconds) _logger.debug("%s postponed", job) + # Do not trigger the error up because we don't want an exception + # traceback in the logs we should have the traceback when all + # retries are exhausted + env.cr.rollback() - except (FailedJobError, Exception): + except (FailedJobError, Exception) as orig_exception: buff = StringIO() traceback.print_exc(file=buff) - _logger.error(buff.getvalue()) + traceback_txt = buff.getvalue() + _logger.error(traceback_txt) job.env.clear() with odoo.api.Environment.manage(): with odoo.registry(job.env.cr.dbname).cursor() as new_cr: job.env = job.env(cr=new_cr) - job.set_failed(exc_info=buff.getvalue()) + vals = self._get_failure_values(job, traceback_txt, orig_exception) + job.set_failed(**vals) job.store() new_cr.commit() + buff.close() raise return "" + def _get_failure_values(self, job, traceback_txt, orig_exception): + """Collect relevant data from exception.""" + exception_name = orig_exception.__class__.__name__ + if hasattr(orig_exception, "__module__"): + exception_name = orig_exception.__module__ + "." + exception_name + exc_message = getattr(orig_exception, "name", str(orig_exception)) + return { + "exc_info": traceback_txt, + "exc_name": exception_name, + "exc_message": exc_message, + } + @http.route("/queue_job/create_test_job", type="http", auth="user") def create_test_job( - self, priority=None, max_retries=None, channel="root", description="Test job" + self, priority=None, max_retries=None, channel=None, description="Test job" ): if not http.request.env.user.has_group("base.group_erp_manager"): raise Forbidden(_("Access Denied")) diff --git a/queue_job/job.py b/queue_job/job.py index 2145b62cb8..a4dc224b85 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -214,6 +214,14 @@ class Job(object): A description of the result (for humans). + .. attribute:: exc_name + + Exception error name when the job failed. + + .. attribute:: exc_message + + Exception error message when the job failed. + .. attribute:: exc_info Exception information (traceback) when the job failed. @@ -441,13 +449,7 @@ def __init__( self.job_model_name = "queue.job" self.job_config = ( - self.env["queue.job.function"] - .sudo() - .job_config( - self.env["queue.job.function"].job_function_name( - self.model_name, self.method_name - ) - ) + self.env["queue.job.function"].sudo().job_config(self.job_function_name) ) self.state = PENDING @@ -484,6 +486,8 @@ def __init__( self.date_done = None self.result = None + self.exc_name = None + self.exc_message = None self.exc_info = None if "company_id" in env.context: @@ -524,17 +528,36 @@ def perform(self): def store(self): """Store the Job""" + job_model = self.env["queue.job"] + # The sentinel is used to prevent edition sensitive fields (such as + # method_name) from RPC methods. + edit_sentinel = job_model.EDIT_SENTINEL + + db_record = self.db_record() + if db_record: + db_record.with_context(_job_edit_sentinel=edit_sentinel).write( + self._store_values() + ) + else: + job_model.with_context(_job_edit_sentinel=edit_sentinel).sudo().create( + self._store_values(create=True) + ) + + def _store_values(self, create=False): vals = { "state": self.state, "priority": self.priority, "retry": self.retry, "max_retries": self.max_retries, + "exc_name": self.exc_name, + "exc_message": self.exc_message, "exc_info": self.exc_info, "company_id": self.company_id, "result": str(self.result) if self.result else False, "date_enqueued": False, "date_started": False, "date_done": False, + "exec_time": False, "eta": False, "identity_key": False, "worker_pid": self.worker_pid, @@ -546,40 +569,59 @@ def store(self): vals["date_started"] = self.date_started if self.date_done: vals["date_done"] = self.date_done + if self.exec_time: + vals["exec_time"] = self.exec_time if self.eta: vals["eta"] = self.eta if self.identity_key: vals["identity_key"] = self.identity_key - job_model = self.env["queue.job"] - # The sentinel is used to prevent edition sensitive fields (such as - # method_name) from RPC methods. - edit_sentinel = job_model.EDIT_SENTINEL - - db_record = self.db_record() - if db_record: - db_record.with_context(_job_edit_sentinel=edit_sentinel).write(vals) - else: - date_created = self.date_created - # The following values must never be modified after the - # creation of the job + if create: vals.update( { + "user_id": self.env.uid, + "channel": self.channel, + # The following values must never be modified after the + # creation of the job "uuid": self.uuid, "name": self.description, - "date_created": date_created, + "func_string": self.func_string, + "date_created": self.date_created, + "model_name": self.recordset._name, "method_name": self.method_name, + "job_function_id": self.job_config.job_function_id, + "channel_method_name": self.job_function_name, "records": self.recordset, "args": self.args, "kwargs": self.kwargs, } ) - # it the channel is not specified, lets the job_model compute - # the right one to use - if self.channel: - vals.update({"channel": self.channel}) - job_model.with_context(_job_edit_sentinel=edit_sentinel).sudo().create(vals) + vals_from_model = self._store_values_from_model() + # Sanitize values: make sure you cannot screw core values + vals_from_model = {k: v for k, v in vals_from_model.items() if k not in vals} + vals.update(vals_from_model) + return vals + + def _store_values_from_model(self): + vals = {} + value_handlers_candidates = ( + "_job_store_values_for_" + self.method_name, + "_job_store_values", + ) + for candidate in value_handlers_candidates: + handler = getattr(self.recordset, candidate, None) + if handler is not None: + vals = handler(self) + return vals + + @property + def func_string(self): + model = repr(self.recordset) + args = [repr(arg) for arg in self.args] + kwargs = ["{}={!r}".format(key, val) for key, val in self.kwargs.items()] + all_args = ", ".join(args + kwargs) + return "{}.{}({})".format(model, self.method_name, all_args) def db_record(self): return self.db_record_from_uuid(self.env, self.uuid) @@ -595,6 +637,11 @@ def func(self): recordset = recordset.with_company(self.company_id) return getattr(recordset, self.method_name) + @property + def job_function_name(self): + func_model = self.env["queue.job.function"].sudo() + return func_model.job_function_name(self.recordset._name, self.method_name) + @property def identity_key(self): if self._identity_key is None: @@ -652,10 +699,25 @@ def eta(self, value): else: self._eta = value + @property + def channel(self): + return self._channel or self.job_config.channel + + @channel.setter + def channel(self, value): + self._channel = value + + @property + def exec_time(self): + if self.date_done and self.date_started: + return (self.date_done - self.date_started).total_seconds() + return None + def set_pending(self, result=None, reset_retry=True): self.state = PENDING self.date_enqueued = None self.date_started = None + self.date_done = None self.worker_pid = None if reset_retry: self.retry = 0 @@ -675,15 +737,17 @@ def set_started(self): def set_done(self, result=None): self.state = DONE + self.exc_name = None self.exc_info = None self.date_done = datetime.now() if result is not None: self.result = result - def set_failed(self, exc_info=None): + def set_failed(self, **kw): self.state = FAILED - if exc_info is not None: - self.exc_info = exc_info + for k, v in kw.items(): + if v is not None: + setattr(self, k, v) def __repr__(self): return "" % (self.uuid, self.priority) @@ -714,6 +778,7 @@ def postpone(self, result=None, seconds=None): """ eta_seconds = self._get_retry_seconds(seconds) self.eta = timedelta(seconds=eta_seconds) + self.exc_name = None self.exc_info = None if result is not None: self.result = result diff --git a/queue_job/migrations/14.0.1.4.0/post-migration.py b/queue_job/migrations/14.0.1.4.0/post-migration.py new file mode 100644 index 0000000000..f6eff72707 --- /dev/null +++ b/queue_job/migrations/14.0.1.4.0/post-migration.py @@ -0,0 +1,47 @@ +# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) + +import logging + +from odoo import SUPERUSER_ID, api + +_logger = logging.getLogger(__name__) + + +def migrate(cr, version): + with api.Environment.manage(): + env = api.Environment(cr, SUPERUSER_ID, {}) + _logger.info("Computing exception name for failed jobs") + _compute_jobs_new_values(env) + + +def _compute_jobs_new_values(env): + for job in env["queue.job"].search( + [("state", "=", "failed"), ("exc_info", "!=", False)] + ): + exception_details = _get_exception_details(job) + if exception_details: + job.update(exception_details) + + +def _get_exception_details(job): + for line in reversed(job.exc_info.splitlines()): + if _find_exception(line): + name, msg = line.split(":", 1) + return { + "exc_name": name.strip(), + "exc_message": msg.strip("()', \""), + } + + +def _find_exception(line): + # Just a list of common errors. + # If you want to target others, add your own migration step for your db. + exceptions = ( + "Error:", # catch all well named exceptions + # other live instance errors found + "requests.exceptions.MissingSchema", + "botocore.errorfactory.NoSuchKey", + ) + for exc in exceptions: + if exc in line: + return exc diff --git a/queue_job/migrations/14.0.1.4.0/pre-migration.py b/queue_job/migrations/14.0.1.4.0/pre-migration.py new file mode 100644 index 0000000000..8ae6cb3a5f --- /dev/null +++ b/queue_job/migrations/14.0.1.4.0/pre-migration.py @@ -0,0 +1,33 @@ +# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) + +from odoo.tools.sql import column_exists, table_exists + + +def migrate(cr, version): + if table_exists(cr, "queue_job") and not column_exists( + cr, "queue_job", "exec_time" + ): + # Disable trigger otherwise the update takes ages. + cr.execute( + """ + ALTER TABLE queue_job DISABLE TRIGGER queue_job_notify; + """ + ) + cr.execute( + """ + ALTER TABLE queue_job ADD COLUMN exec_time double precision DEFAULT 0; + """ + ) + cr.execute( + """ + UPDATE + queue_job + SET + exec_time = EXTRACT(EPOCH FROM (date_done - date_started)); + """ + ) + cr.execute( + """ + ALTER TABLE queue_job ENABLE TRIGGER queue_job_notify; + """ + ) diff --git a/queue_job/models/base.py b/queue_job/models/base.py index 5b1f8fee79..a398bf85fb 100644 --- a/queue_job/models/base.py +++ b/queue_job/models/base.py @@ -5,7 +5,7 @@ import logging import os -from odoo import models +from odoo import api, models from ..job import DelayableRecordset @@ -189,3 +189,19 @@ def auto_delay_wrapper(self, *args, **kwargs): origin = getattr(self, method_name) return functools.update_wrapper(auto_delay_wrapper, origin) + + @api.model + def _job_store_values(self, job): + """Hook for manipulating job stored values. + + You can define a more specific hook for a job function + by defining a method name with this pattern: + + `_queue_job_store_values_${func_name}` + + NOTE: values will be stored only if they match stored fields on `queue.job`. + + :param job: current queue_job.job.Job instance. + :return: dictionary for setting job values. + """ + return {} diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 157e4d8cbc..2d8ef74538 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -37,27 +37,22 @@ class QueueJob(models.Model): "date_created", "model_name", "method_name", + "func_string", + "channel_method_name", + "job_function_id", "records", "args", "kwargs", ) uuid = fields.Char(string="UUID", readonly=True, index=True, required=True) - user_id = fields.Many2one( - comodel_name="res.users", - string="User ID", - compute="_compute_user_id", - inverse="_inverse_user_id", - store=True, - ) + user_id = fields.Many2one(comodel_name="res.users", string="User ID") company_id = fields.Many2one( comodel_name="res.company", string="Company", index=True ) name = fields.Char(string="Description", readonly=True) - model_name = fields.Char( - string="Model", compute="_compute_model_name", store=True, readonly=True - ) + model_name = fields.Char(string="Model", readonly=True) method_name = fields.Char(readonly=True) # record_ids field is only for backward compatibility (e.g. used in related # actions), can be removed (replaced by "records") in 14.0 @@ -69,12 +64,12 @@ class QueueJob(models.Model): ) args = JobSerialized(readonly=True, base_type=tuple) kwargs = JobSerialized(readonly=True, base_type=dict) - func_string = fields.Char( - string="Task", compute="_compute_func_string", readonly=True, store=True - ) + func_string = fields.Char(string="Task", readonly=True) state = fields.Selection(STATES, readonly=True, required=True, index=True) priority = fields.Integer() + exc_name = fields.Char(string="Exception", readonly=True) + exc_message = fields.Char(string="Exception Message", readonly=True) exc_info = fields.Text(string="Exception Info", readonly=True) result = fields.Text(readonly=True) @@ -82,6 +77,11 @@ class QueueJob(models.Model): date_started = fields.Datetime(string="Start Date", readonly=True) date_enqueued = fields.Datetime(string="Enqueue Time", readonly=True) date_done = fields.Datetime(readonly=True) + exec_time = fields.Float( + string="Execution Time (avg)", + group_operator="avg", + help="Time required to execute this job in seconds. Average when grouped.", + ) eta = fields.Datetime(string="Execute only after") retry = fields.Integer(string="Current try") @@ -91,24 +91,18 @@ class QueueJob(models.Model): "max. retries.\n" "Retries are infinite when empty.", ) - channel_method_name = fields.Char( - readonly=True, compute="_compute_job_function", store=True - ) + # FIXME the name of this field is very confusing + channel_method_name = fields.Char(readonly=True) job_function_id = fields.Many2one( comodel_name="queue.job.function", - compute="_compute_job_function", string="Job Function", readonly=True, - store=True, ) - override_channel = fields.Char() - channel = fields.Char( - compute="_compute_channel", inverse="_inverse_channel", store=True, index=True - ) + channel = fields.Char(index=True) - identity_key = fields.Char() - worker_pid = fields.Integer() + identity_key = fields.Char(readonly=True) + worker_pid = fields.Integer(readonly=True) def init(self): self._cr.execute( @@ -122,67 +116,23 @@ def init(self): "'enqueued') AND identity_key IS NOT NULL;" ) - @api.depends("records") - def _compute_user_id(self): - for record in self: - record.user_id = record.records.env.uid - - def _inverse_user_id(self): - for record in self.with_context(_job_edit_sentinel=self.EDIT_SENTINEL): - record.records = record.records.with_user(record.user_id.id) - - @api.depends("records") - def _compute_model_name(self): - for record in self: - record.model_name = record.records._name - @api.depends("records") def _compute_record_ids(self): for record in self: record.record_ids = record.records.ids - def _inverse_channel(self): - for record in self: - record.override_channel = record.channel - - @api.depends("job_function_id.channel_id") - def _compute_channel(self): - for record in self: - channel = ( - record.override_channel or record.job_function_id.channel or "root" - ) - if record.channel != channel: - record.channel = channel - - @api.depends("model_name", "method_name", "job_function_id.channel_id") - def _compute_job_function(self): - for record in self: - func_model = self.env["queue.job.function"] - channel_method_name = func_model.job_function_name( - record.model_name, record.method_name - ) - function = func_model.search([("name", "=", channel_method_name)], limit=1) - record.channel_method_name = channel_method_name - record.job_function_id = function - - @api.depends("model_name", "method_name", "records", "args", "kwargs") - def _compute_func_string(self): - for record in self: - model = repr(record.records) - args = [repr(arg) for arg in record.args] - kwargs = ["{}={!r}".format(key, val) for key, val in record.kwargs.items()] - all_args = ", ".join(args + kwargs) - record.func_string = "{}.{}({})".format(model, record.method_name, all_args) - @api.model_create_multi def create(self, vals_list): if self.env.context.get("_job_edit_sentinel") is not self.EDIT_SENTINEL: # Prevent to create a queue.job record "raw" from RPC. # ``with_delay()`` must be used. raise exceptions.AccessError( - _("Queue jobs must created by calling 'with_delay()'.") + _("Queue jobs must be created by calling 'with_delay()'.") ) - return super().create(vals_list) + return super( + QueueJob, + self.with_context(mail_create_nolog=True, mail_create_nosubscribe=True), + ).create(vals_list) def write(self, vals): if self.env.context.get("_job_edit_sentinel") is not self.EDIT_SENTINEL: @@ -196,10 +146,25 @@ def write(self, vals): ) ) + different_user_jobs = self.browse() + if vals.get("user_id"): + different_user_jobs = self.filtered( + lambda records: records.env.user.id != vals["user_id"] + ) + if vals.get("state") == "failed": self._message_post_on_failure() - return super().write(vals) + result = super().write(vals) + + for record in different_user_jobs: + # the user is stored in the env of the record, but we still want to + # have a stored user_id field to be able to search/groupby, so + # synchronize the env of records with user_id + super(QueueJob, record).write( + {"records": record.records.with_user(vals["user_id"])} + ) + return result def open_related_action(self): """Open the related action associated to the job""" @@ -239,9 +204,10 @@ def _message_post_on_failure(self): # subscribe the users now to avoid to subscribe them # at every job creation domain = self._subscribe_users_domain() - users = self.env["res.users"].search(domain) - self.message_subscribe(partner_ids=users.mapped("partner_id").ids) + base_users = self.env["res.users"].search(domain) for record in self: + users = base_users | record.user_id + record.message_subscribe(partner_ids=users.mapped("partner_id").ids) msg = record._message_failed_job() if msg: record.message_post(body=msg, subtype_xmlid="queue_job.mt_job_failed") diff --git a/queue_job/models/queue_job_function.py b/queue_job/models/queue_job_function.py index a80f4920d3..f593b6520a 100644 --- a/queue_job/models/queue_job_function.py +++ b/queue_job/models/queue_job_function.py @@ -27,7 +27,8 @@ class QueueJobFunction(models.Model): "retry_pattern " "related_action_enable " "related_action_func_name " - "related_action_kwargs ", + "related_action_kwargs " + "job_function_id ", ) def _default_channel(self): @@ -141,6 +142,7 @@ def job_default_config(self): related_action_enable=True, related_action_func_name=None, related_action_kwargs={}, + job_function_id=None, ) def _parse_retry_pattern(self): @@ -173,6 +175,7 @@ def job_config(self, name): related_action_enable=config.related_action.get("enable", True), related_action_func_name=config.related_action.get("func_name"), related_action_kwargs=config.related_action.get("kwargs", {}), + job_function_id=config.id, ) def _retry_pattern_format_error_message(self): diff --git a/queue_job/tests/test_model_job_function.py b/queue_job/tests/test_model_job_function.py index c9bdea56e8..e6ddf3fcc3 100644 --- a/queue_job/tests/test_model_job_function.py +++ b/queue_job/tests/test_model_job_function.py @@ -31,7 +31,7 @@ def test_function_job_config(self): channel = self.env["queue.job.channel"].create( {"name": "foo", "parent_id": self.env.ref("queue_job.channel_root").id} ) - self.env["queue.job.function"].create( + job_function = self.env["queue.job.function"].create( { "model_id": self.env.ref("base.model_res_users").id, "method": "read", @@ -52,5 +52,6 @@ def test_function_job_config(self): related_action_enable=True, related_action_func_name="related_action_foo", related_action_kwargs={"b": 1}, + job_function_id=job_function.id, ), ) diff --git a/queue_job/views/queue_job_views.xml b/queue_job/views/queue_job_views.xml index a68c03d34f..cb7aeac28b 100644 --- a/queue_job/views/queue_job_views.xml +++ b/queue_job/views/queue_job_views.xml @@ -57,6 +57,8 @@ + + @@ -69,6 +71,18 @@ > If the max. retries is 0, the number of retries is infinite. + +
+
+ +
- - -
@@ -106,9 +113,12 @@ - + + + + @@ -116,6 +126,29 @@ + + queue.job.pivot + queue.job + + + + + + + + + + + queue.job.graph + queue.job + + + + + + + + queue.job.search queue.job @@ -126,6 +159,11 @@ + + + + + + + + @@ -176,7 +229,7 @@ Jobs queue.job - tree,form + tree,form,pivot,graph {'search_default_pending': 1, 'search_default_enqueued': 1, 'search_default_started': 1, diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py index a5a3843230..a9f32dec0e 100644 --- a/test_queue_job/models/test_models.py +++ b/test_queue_job/models/test_models.py @@ -10,6 +10,8 @@ class QueueJob(models.Model): _inherit = "queue.job" + additional_info = fields.Char() + def testing_related_method(self, **kwargs): return self, kwargs @@ -88,6 +90,12 @@ def _register_hook(self): ) return super()._register_hook() + def _job_store_values(self, job): + value = "JUST_TESTING" + if job.state == "failed": + value += "_BUT_FAILED" + return {"additional_info": value} + class TestQueueChannel(models.Model): diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index 3cbb034acb..c78c551f2a 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -7,7 +7,6 @@ import mock import odoo.tests.common as common -from odoo import SUPERUSER_ID from odoo.addons.queue_job import identity_exact from odoo.addons.queue_job.exception import ( @@ -150,21 +149,29 @@ def test_worker_pid(self): def test_set_done(self): job_a = Job(self.method) + job_a.date_started = datetime(2015, 3, 15, 16, 40, 0) datetime_path = "odoo.addons.queue_job.job.datetime" with mock.patch(datetime_path, autospec=True) as mock_datetime: mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0) job_a.set_done(result="test") - self.assertEqual(job_a.state, DONE) - self.assertEqual(job_a.result, "test") - self.assertEqual(job_a.date_done, datetime(2015, 3, 15, 16, 41, 0)) + self.assertEquals(job_a.state, DONE) + self.assertEquals(job_a.result, "test") + self.assertEquals(job_a.date_done, datetime(2015, 3, 15, 16, 41, 0)) + self.assertEquals(job_a.exec_time, 60.0) self.assertFalse(job_a.exc_info) def test_set_failed(self): job_a = Job(self.method) - job_a.set_failed(exc_info="failed test") - self.assertEqual(job_a.state, FAILED) - self.assertEqual(job_a.exc_info, "failed test") + job_a.set_failed( + exc_info="failed test", + exc_name="FailedTest", + exc_message="Sadly this job failed", + ) + self.assertEquals(job_a.state, FAILED) + self.assertEquals(job_a.exc_info, "failed test") + self.assertEquals(job_a.exc_name, "FailedTest") + self.assertEquals(job_a.exc_message, "Sadly this job failed") def test_postpone(self): job_a = Job(self.method) @@ -226,6 +233,16 @@ def test_store(self): stored = self.queue_job.search([("uuid", "=", test_job.uuid)]) self.assertEqual(len(stored), 1) + def test_store_extra_data(self): + test_job = Job(self.method) + test_job.store() + stored = self.queue_job.search([("uuid", "=", test_job.uuid)]) + self.assertEqual(stored.additional_info, "JUST_TESTING") + test_job.set_failed(exc_info="failed test", exc_name="FailedTest") + test_job.store() + stored.invalidate_cache() + self.assertEqual(stored.additional_info, "JUST_TESTING_BUT_FAILED") + def test_read(self): eta = datetime.now() + timedelta(hours=5) test_job = Job( @@ -277,6 +294,7 @@ def test_read(self): self.assertAlmostEqual(job_read.date_started, test_date, delta=delta) self.assertAlmostEqual(job_read.date_enqueued, test_date, delta=delta) self.assertAlmostEqual(job_read.date_done, test_date, delta=delta) + self.assertAlmostEqual(job_read.exec_time, 0.0) def test_job_unlinked(self): test_job = Job(self.method, args=("o", "k"), kwargs={"c": "!"}) @@ -524,7 +542,7 @@ def test_message_when_write_fail(self): stored.write({"state": "failed"}) self.assertEqual(stored.state, FAILED) messages = stored.message_ids - self.assertEqual(len(messages), 2) + self.assertEqual(len(messages), 1) def test_follower_when_write_fail(self): """Check that inactive users doesn't are not followers even if @@ -583,6 +601,22 @@ def setUp(self): User = self.env["res.users"] Company = self.env["res.company"] Partner = self.env["res.partner"] + + main_company = self.env.ref("base.main_company") + + self.partner_user = Partner.create( + {"name": "Simple User", "email": "simple.user@example.com"} + ) + self.simple_user = User.create( + { + "partner_id": self.partner_user.id, + "company_ids": [(4, main_company.id)], + "login": "simple_user", + "name": "simple user", + "groups_id": [], + } + ) + self.other_partner_a = Partner.create( {"name": "My Company a", "is_company": True, "email": "test@tes.ttest"} ) @@ -599,7 +633,7 @@ def setUp(self): "company_id": self.other_company_a.id, "company_ids": [(4, self.other_company_a.id)], "login": "my_login a", - "name": "my user", + "name": "my user A", "groups_id": [(4, grp_queue_job_manager)], } ) @@ -619,16 +653,11 @@ def setUp(self): "company_id": self.other_company_b.id, "company_ids": [(4, self.other_company_b.id)], "login": "my_login_b", - "name": "my user 1", + "name": "my user B", "groups_id": [(4, grp_queue_job_manager)], } ) - def _subscribe_users(self, stored): - domain = stored._subscribe_users_domain() - users = self.env["res.users"].search(domain) - stored.message_subscribe(partner_ids=users.mapped("partner_id").ids) - def _create_job(self, env): self.cr.execute("delete from queue_job") env["test.queue.job"].with_delay().testing_method() @@ -674,11 +703,14 @@ def test_job_subscription(self): # queue_job.group_queue_job_manager must be followers User = self.env["res.users"] no_company_context = dict(self.env.context, company_id=None) - no_company_env = self.env(context=no_company_context) + no_company_env = self.env(user=self.simple_user, context=no_company_context) stored = self._create_job(no_company_env) - self._subscribe_users(stored) - users = User.with_context(active_test=False).search( - [("groups_id", "=", self.ref("queue_job.group_queue_job_manager"))] + stored._message_post_on_failure() + users = ( + User.search( + [("groups_id", "=", self.ref("queue_job.group_queue_job_manager"))] + ) + + stored.user_id ) self.assertEqual(len(stored.message_follower_ids), len(users)) expected_partners = [u.partner_id for u in users] @@ -692,13 +724,13 @@ def test_job_subscription(self): # jobs created for a specific company_id are followed only by # company's members company_a_context = dict(self.env.context, company_id=self.other_company_a.id) - company_a_env = self.env(context=company_a_context) + company_a_env = self.env(user=self.simple_user, context=company_a_context) stored = self._create_job(company_a_env) stored.with_user(self.other_user_a.id) - self._subscribe_users(stored) - # 2 because admin + self.other_partner_a + stored._message_post_on_failure() + # 2 because simple_user (creator of job) + self.other_partner_a self.assertEqual(len(stored.message_follower_ids), 2) - users = User.browse([SUPERUSER_ID, self.other_user_a.id]) + users = self.simple_user + self.other_user_a expected_partners = [u.partner_id for u in users] self.assertSetEqual( set(stored.message_follower_ids.mapped("partner_id")),