diff --git a/.codeclimate.yml b/.codeclimate.yml
new file mode 100644
index 0000000000..101d522c3e
--- /dev/null
+++ b/.codeclimate.yml
@@ -0,0 +1,8 @@
+languages:
+ JavaScript: true
+ Python: true
+exclude_paths:
+- "__unported__/*"
+- "*/__openerp__.py" # because this is fed to eval
+- "*/__manifest__.py"
+- "*/migrations/*" # without OpenUpgrade, repetitions can be necessary
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000..f7f8a408be
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,59 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+bin/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Pycharm
+.idea
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+
+# Rope
+.ropeproject
+
+# Sphinx documentation
+docs/_build/
+
+# Backup files
+*~
+*.swp
+
+# OSX Files
+*.DS_Store
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000000..a44f8295dc
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,72 @@
+language: python
+sudo: false
+cache:
+ apt: true
+ directories:
+ - $HOME/.cache/pip
+
+python:
+ - "2.7"
+
+addons:
+ apt:
+# only add the two lines below if you need wkhtmltopdf for your tests
+# sources:
+# - pov-wkhtmltopdf
+# Search your sources alias here:
+# https://github.com/travis-ci/apt-source-whitelist/blob/master/ubuntu.json
+ packages:
+ - expect-dev # provides unbuffer utility
+ - python-lxml # because pip installation is slow
+ - python-simplejson
+ - python-serial
+ - python-yaml
+# Search your packages here:
+# https://github.com/travis-ci/apt-package-whitelist/blob/master/ubuntu-precise
+# - wkhtmltopdf # only add if needed and check the before_install section below
+
+# set up an X server to run wkhtmltopdf.
+#before_install:
+# - "export DISPLAY=:99.0"
+# - "sh -e /etc/init.d/xvfb start"
+
+env:
+ global:
+ - VERSION="10.0" TESTS="0" LINT_CHECK="0" TRANSIFEX="0"
+ - TRANSIFEX_USER='transbot@odoo-community.org'
+ # This line contains the encrypted transifex password
+ # To encrypt transifex password, install travis ruby utils with:
+ # $ gem install travis --user-install
+ # and use:
+ # $ travis encrypt TRANSIFEX_PASSWORD=your-password -r owner/project
+ # Secure list for current OCA projects is in https://github.com/OCA/maintainer-quality-tools/issues/194
+ # - secure: PjP88tPSwimBv4tsgn3UcQAD1heK/wcuSaSfhi2xUt/jSrOaTmWzjaW2gH/eKM1ilxPXwlPGyAIShJ2JJdBiA97hQufOeiqxdkWDctnNVVEDx2Tk0BiG3PPYyhXPgUZ+FNOnjZFF3pNWvzXTQaB0Nvz8plqp93Ov/DEyhrCxHDs=
+ # Use the following lines if you need to manually change the transifex project slug or/and the transifex organization.
+ # The default project slug is owner-repo_name-version (with dash in the version string).
+ # The default organization is the owner of the repo.
+ # The default fill up resources (TM) is True.
+ # The default team is 23907. https://www.transifex.com/organization/oca/team/23907/
+ # - TRANSIFEX_PROJECT_SLUG=
+ # - TRANSIFEX_ORGANIZATION=
+ # - TRANSIFEX_FILL_UP_RESOURCES=
+ # - TRANSIFEX_TEAM=
+
+ matrix:
+ - LINT_CHECK="1"
+ - TRANSIFEX="0" # need a user
+ - TESTS="1" ODOO_REPO="odoo/odoo"
+ - TESTS="1" ODOO_REPO="OCA/OCB"
+
+virtualenv:
+ system_site_packages: true
+
+install:
+ - git clone --depth=1 https://github.com/OCA/maintainer-quality-tools.git ${HOME}/maintainer-quality-tools
+ - export PATH=${HOME}/maintainer-quality-tools/travis:${PATH}
+ - travis_install_nightly
+
+script:
+ - travis_run_tests
+
+after_success:
+ - travis_after_tests_success
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..92d051bc9e
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,7 @@
+# OCA Guidelines
+
+Please follow the official guide from the [OCA Guidelines page](https://github.com/OCA/maintainer-tools/blob/master/CONTRIBUTING.md).
+
+## Project Specific Guidelines
+
+This project does not have specific coding guidelines.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000..58777e31af
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,661 @@
+GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/README.md b/README.md
index 990df9333d..f964761c40 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,6 @@
[](https://travis-ci.org/OCA/queue)
-[](https://coveralls.io/r/OCA/queue?branch=10.0)
+[](https://codecov.io/gh/OCA/queue)
+
Odoo Queue Modules
==================
@@ -8,3 +9,13 @@ Asynchronous Job Queue. Delay Model methods in asynchronous jobs, executed in
the background as soon as possible or on a schedule. Support Channels to
segregates jobs in different queues with different capacities. Not alike the
scheduled tasks, a job captures arguments for later processing.
+
+
+[//]: # (addons)
+This part will be replaced when running the oca-gen-addons-table script from OCA/maintainer-tools.
+[//]: # (end addons)
+
+Translation Status
+------------------
+[](https://www.transifex.com/projects/p/OCA-queue-10-0)
+
diff --git a/oca_dependencies.txt b/oca_dependencies.txt
new file mode 100644
index 0000000000..ac0117d109
--- /dev/null
+++ b/oca_dependencies.txt
@@ -0,0 +1,15 @@
+# List the OCA project dependencies, one per line
+# Add a repository url and branch if you need a forked version
+#
+# Examples
+# ========
+#
+# To depend on the standard version of sale-workflow, use:
+# sale-workflow
+#
+# To explicitely give the URL of a fork, and still use the version specified in
+# .travis.yml, use:
+# sale-workflow https://github.com/OCA/sale-workflow
+#
+# To provide both the URL and a branch, use:
+# sale-workflow https://github.com/OCA/sale-workflow branchname
diff --git a/queue_job/README.rst b/queue_job/README.rst
new file mode 100644
index 0000000000..241aabaed8
--- /dev/null
+++ b/queue_job/README.rst
@@ -0,0 +1,183 @@
+.. image:: https://img.shields.io/badge/licence-AGPL--3-blue.svg
+ :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html
+ :alt: License: AGPL-3
+
+=========
+Job Queue
+=========
+
+This addon adds an integrated Job Queue to Odoo.
+
+It allows to postpone method calls executed asynchronously.
+
+Jobs are executed in the background by a ``Jobrunner``, in their own transaction.
+
+Example:
+
+.. code-block:: python
+
+ class MyModel(models.Model):
+ _name = 'my.model'
+
+ @api.multi
+ @job
+ def my_method(self, a, k=None):
+ _logger.info('executed with a: %s and k: %s', a, k)
+
+
+ class MyOtherModel(models.Model):
+ _name = 'my.other.model'
+
+ @api.multi
+ def button_do_stuff(self):
+ self.env['my.model'].with_delay().my_method('a', k=2)
+
+
+In the snippet of code above, when we call ``button_do_stuff``, a job capturing
+the method and arguments will be postponed. It will be executed as soon as the
+Jobrunner has a free bucket, which can be instantaneous if no other job is
+running.
+
+
+Features:
+* Views for jobs, jobs are stored in PostgreSQL
+* Jobrunner: execute the jobs, highly efficient thanks to PostgreSQL's NOTIFY
+* Channels: give a capacity for the root channel and its sub-channels and
+ segregate jobs in them. Allow for instance to restrict heavy jobs to be
+ executed one at a time while little ones are executed 4 at a times.
+* Retries: Ability to retry jobs by raising a type of exception
+* Retry Pattern: the 3 first tries, retry after 10 seconds, the 5 next tries,
+ retry after 1 minutes, ...
+* Job properties: priorities, estimated time of arrival (ETA), custom
+ description, number of retries
+* Related Actions: link an action on the job view, such as open the record
+ concerned by the job
+
+
+Installation
+============
+
+Be sure to have the ``requests`` library.
+
+Configuration
+=============
+
+* Using environment variables and command line:
+
+ * Adjust environment variables (optional):
+
+ - ``ODOO_QUEUE_JOB_CHANNELS=root:4``
+
+ - or any other channels configuration. The default is ``root:1``
+
+ - if ``xmlrpc_port`` is not set: ``ODOO_QUEUE_JOB_PORT=8069``
+
+ * Start Odoo with ``--load=web,web_kanban,queue_job``
+ and ``--workers`` greater than 1. [1]_
+
+
+* Using the Odoo configuration file:
+
+.. code-block:: ini
+
+ [options]
+ (...)
+ workers = 4
+ server_wide_modules = web,web_kanban,queue_job
+
+ (...)
+ [queue_job]
+ channels = root:4
+
+* Confirm the runner is starting correctly by checking the odoo log file:
+
+.. code-block:: none
+
+ ...INFO...queue_job.jobrunner.runner: starting
+ ...INFO...queue_job.jobrunner.runner: initializing database connections
+ ...INFO...queue_job.jobrunner.runner: queue job runner ready for db
+ ...INFO...queue_job.jobrunner.runner: database connections ready
+
+* Create jobs (eg using ``base_import_async``) and observe they
+ start immediately and in parallel.
+
+* Tip: to enable debug logging for the queue job, use
+ ``--log-handler=odoo.addons.queue_job:DEBUG``
+
+.. [1] It works with the threaded Odoo server too, although this way
+ of running Odoo is obviously not for production purposes.
+
+Usage
+=====
+
+To use this module, you need to:
+
+#. Go to ``Job Queue`` menu
+
+.. image:: https://odoo-community.org/website/image/ir.attachment/5784_f2813bd/datas
+ :alt: Try me on Runbot
+ :target: https://runbot.odoo-community.org/runbot/230/10.0
+
+Known issues / Roadmap
+======================
+
+* After creating a new database or installing ``queue_job`` on an
+ existing database, Odoo must be restarted for the runner to detect it.
+
+* When Odoo shuts down normally, it waits for running jobs to finish.
+ However, when the Odoo server crashes or is otherwise force-stopped,
+ running jobs are interrupted while the runner has no chance to know
+ they have been aborted. In such situations, jobs may remain in
+ ``started`` or ``enqueued`` state after the Odoo server is halted.
+ Since the runner has no way to know if they are actually running or
+ not, and does not know for sure if it is safe to restart the jobs,
+ it does not attempt to restart them automatically. Such stale jobs
+ therefore fill the running queue and prevent other jobs to start.
+ You must therefore requeue them manually, either from the Jobs view,
+ or by running the following SQL statement *before starting Odoo*:
+
+.. code-block:: sql
+
+ update queue_job set state='pending' where state in ('started', 'enqueued')
+
+Bug Tracker
+===========
+
+Bugs are tracked on `GitHub Issues
+`_. In case of trouble, please
+check there if your issue has already been reported. If you spotted it first,
+help us smashing it by providing a detailed and welcomed feedback.
+
+Credits
+=======
+
+Images
+------
+
+* Odoo Community Association: `Icon `_.
+
+Contributors
+------------
+
+* Guewen Baconnier
+* Stéphane Bidoul
+* Matthieu Dietrich
+* Jos De Graeve
+* David Lefever
+* Laurent Mignon
+* Laetitia Gangloff
+
+Maintainer
+----------
+
+.. image:: https://odoo-community.org/logo.png
+ :alt: Odoo Community Association
+ :target: https://odoo-community.org
+
+This module is maintained by the OCA.
+
+OCA, or the Odoo Community Association, is a nonprofit organization whose
+mission is to support the collaborative development of Odoo features and
+promote its widespread use.
+
+To contribute to this module, please visit https://odoo-community.org.
diff --git a/queue_job/__init__.py b/queue_job/__init__.py
new file mode 100644
index 0000000000..a2c924c4aa
--- /dev/null
+++ b/queue_job/__init__.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8 -*-
+from . import controllers
+from . import fields
+from . import models
+from . import jobrunner
diff --git a/queue_job/__manifest__.py b/queue_job/__manifest__.py
new file mode 100644
index 0000000000..bcf0e0a6a1
--- /dev/null
+++ b/queue_job/__manifest__.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+
+{'name': 'Job Queue',
+ 'version': '10.0.1.0.0',
+ 'author': 'Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)',
+ 'website': 'https://github.com/OCA/queue/queue_job',
+ 'license': 'AGPL-3',
+ 'category': 'Generic Modules',
+ 'depends': ['mail'
+ ],
+ 'external_dependencies': {'python': ['requests'
+ ],
+ },
+ 'data': ['security/security.xml',
+ 'security/ir.model.access.csv',
+ 'views/queue_job_views.xml',
+ 'data/queue_data.xml',
+ ],
+ 'installable': True,
+ }
diff --git a/queue_job/controllers/__init__.py b/queue_job/controllers/__init__.py
new file mode 100644
index 0000000000..12a7e529b6
--- /dev/null
+++ b/queue_job/controllers/__init__.py
@@ -0,0 +1 @@
+from . import main
diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py
new file mode 100644
index 0000000000..bfaf3314eb
--- /dev/null
+++ b/queue_job/controllers/main.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2016 ACSONE SA/NV ()
+# Copyright 2013-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import logging
+import traceback
+from cStringIO import StringIO
+
+from psycopg2 import OperationalError
+
+import odoo
+from odoo import _, http, tools
+from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
+
+from ..job import Job, ENQUEUED
+from ..exception import (NoSuchJobError,
+ NotReadableJobError,
+ RetryableJobError,
+ FailedJobError,
+ NothingToDoJob)
+
+_logger = logging.getLogger(__name__)
+
+PG_RETRY = 5 # seconds
+
+
+class RunJobController(http.Controller):
+
+ def _load_job(self, env, job_uuid):
+ """ Reload a job from the backend """
+ try:
+ job = Job.load(env, job_uuid)
+ except NoSuchJobError:
+ # just skip it
+ job = None
+ except NotReadableJobError:
+ _logger.exception('Could not read job: %s', job_uuid)
+ raise
+ return job
+
+ def _try_perform_job(self, env, job):
+ """Try to perform the job."""
+
+ # if the job has been manually set to DONE or PENDING,
+ # or if something tries to run a job that is not enqueued
+ # before its execution, stop
+ if job.state != ENQUEUED:
+ _logger.warning('job %s is in state %s '
+ 'instead of enqueued in /runjob',
+ job.uuid, job.state)
+ return
+
+ # TODO: set_started should be done atomically with
+ # update queue_job set=state=started
+ # where state=enqueid and id=
+ job.set_started()
+ job.store()
+ http.request.env.cr.commit()
+
+ _logger.debug('%s started', job)
+ job.perform()
+ job.set_done()
+ job.store()
+ http.request.env.cr.commit()
+ _logger.debug('%s done', job)
+
+ @http.route('/queue_job/runjob', type='http', auth='none')
+ def runjob(self, db, job_uuid, **kw):
+ http.request.session.db = db
+ env = http.request.env(user=odoo.SUPERUSER_ID)
+
+ def retry_postpone(job, message, seconds=None):
+ job.postpone(result=message, seconds=seconds)
+ job.set_pending(reset_retry=False)
+ job.store()
+ env.cr.commit()
+
+ job = self._load_job(env, job_uuid)
+ if job is None:
+ return ""
+ env.cr.commit()
+
+ try:
+ try:
+ self._try_perform_job(env, job)
+ except OperationalError as err:
+ # Automatically retry the typical transaction serialization
+ # errors
+ if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
+ raise
+
+ retry_postpone(job, tools.ustr(err.pgerror, errors='replace'),
+ seconds=PG_RETRY)
+ _logger.debug('%s OperationalError, postponed', job)
+
+ except NothingToDoJob as err:
+ if unicode(err):
+ msg = unicode(err)
+ else:
+ msg = _('Job interrupted and set to Done: nothing to do.')
+ job.set_done(msg)
+ job.store()
+ env.cr.commit()
+
+ except RetryableJobError as err:
+ # delay the job later, requeue
+ retry_postpone(job, unicode(err), seconds=err.seconds)
+ _logger.debug('%s postponed', job)
+
+ except (FailedJobError, Exception):
+ buff = StringIO()
+ traceback.print_exc(file=buff)
+ _logger.error(buff.getvalue())
+ job.env.clear()
+ with odoo.api.Environment.manage():
+ with odoo.registry(job.env.cr.dbname).cursor() as new_cr:
+ job.env = job.env(cr=new_cr)
+ job.set_failed(exc_info=buff.getvalue())
+ job.store()
+ new_cr.commit()
+ raise
+
+ return ""
diff --git a/queue_job/data/queue_data.xml b/queue_job/data/queue_data.xml
new file mode 100644
index 0000000000..58d69c8f21
--- /dev/null
+++ b/queue_job/data/queue_data.xml
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+ Job failed
+ queue.job
+
+
+
+
+ AutoVacuum Job Queue
+
+
+ 1
+ days
+ -1
+
+
+
+
+
+
+
+
+
+
+
+ root
+
+
+
+
diff --git a/queue_job/exception.py b/queue_job/exception.py
new file mode 100644
index 0000000000..87b86a59ad
--- /dev/null
+++ b/queue_job/exception.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2012-2016 Camptocamp
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+
+class BaseQueueJobError(Exception):
+ """ Base queue job error """
+
+
+class JobError(BaseQueueJobError):
+ """ A job had an error """
+
+
+class NoSuchJobError(JobError):
+ """ The job does not exist. """
+
+
+class NotReadableJobError(JobError):
+ """ The job cannot be read from the storage. """
+
+
+class FailedJobError(JobError):
+ """ A job had an error having to be resolved. """
+
+
+class RetryableJobError(JobError):
+ """ A job had an error but can be retried.
+
+ The job will be retried after the given number of seconds. If seconds is
+ empty, it will be retried according to the ``retry_pattern`` of the job or
+ by :const:`odoo.addons.queue_job.job.RETRY_INTERVAL` if nothing is defined.
+
+ If ``ignore_retry`` is True, the retry counter will not be increased.
+ """
+
+ def __init__(self, msg, seconds=None, ignore_retry=False):
+ super(RetryableJobError, self).__init__(msg)
+ self.seconds = seconds
+ self.ignore_retry = ignore_retry
+
+
+# TODO: remove support of NothingToDo: too dangerous
+class NothingToDoJob(JobError):
+ """ The Job has nothing to do. """
+
+
+class ChannelNotFound(BaseQueueJobError):
+ """ A channel could not be found """
diff --git a/queue_job/fields.py b/queue_job/fields.py
new file mode 100644
index 0000000000..3c9b66226a
--- /dev/null
+++ b/queue_job/fields.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# copyright 2016 Camptocamp
+# license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import json
+
+from odoo import fields, models
+
+
+class JobSerialized(fields.Field):
+ """ Serialized fields provide the storage for sparse fields. """
+ type = 'job_serialized'
+ column_type = ('text', 'text')
+
+ def convert_to_column(self, value, record):
+ return json.dumps(value, cls=JobEncoder)
+
+ def convert_to_cache(self, value, record, validate=True):
+ # cache format: dict
+ value = value or {}
+ if isinstance(value, dict):
+ return value
+ else:
+ return json.loads(value, cls=JobDecoder, env=record.env)
+
+
+class JobEncoder(json.JSONEncoder):
+ """ Encode Odoo recordsets so that we can later recompose them """
+
+ def default(self, obj):
+ if isinstance(obj, models.BaseModel):
+ return {'_type': 'odoo_recordset',
+ 'model': obj._name,
+ 'ids': obj.ids}
+ return json.JSONEncoder.default(self, obj)
+
+
+class JobDecoder(json.JSONDecoder):
+ """ Decode json, recomposing recordsets """
+
+ def __init__(self, *args, **kwargs):
+ env = kwargs.pop('env')
+ super(JobDecoder, self).__init__(
+ object_hook=self.object_hook, *args, **kwargs
+ )
+ assert env
+ self.env = env
+
+ def object_hook(self, obj):
+ if '_type' not in obj:
+ return obj
+ type_ = obj['_type']
+ if type_ == 'odoo_recordset':
+ return self.env[obj['model']].browse(obj['ids'])
+ return obj
diff --git a/queue_job/job.py b/queue_job/job.py
new file mode 100644
index 0000000000..7b678a6124
--- /dev/null
+++ b/queue_job/job.py
@@ -0,0 +1,703 @@
+# -*- coding: utf-8 -*-
+# Copyright 2013-2016 Camptocamp
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import inspect
+import functools
+import logging
+import uuid
+import sys
+from datetime import datetime, timedelta
+
+import odoo
+
+from .exception import (NoSuchJobError,
+ FailedJobError,
+ RetryableJobError)
+
+PENDING = 'pending'
+ENQUEUED = 'enqueued'
+DONE = 'done'
+STARTED = 'started'
+FAILED = 'failed'
+
+STATES = [(PENDING, 'Pending'),
+ (ENQUEUED, 'Enqueued'),
+ (STARTED, 'Started'),
+ (DONE, 'Done'),
+ (FAILED, 'Failed')]
+
+DEFAULT_PRIORITY = 10 # used by the PriorityQueue to sort the jobs
+DEFAULT_MAX_RETRIES = 5
+RETRY_INTERVAL = 10 * 60 # seconds
+
+_logger = logging.getLogger(__name__)
+
+
+class DelayableRecordset(object):
+ """ Allow to delay a method for a recordset
+
+ Usage::
+
+ delayable = DelayableRecordset(recordset, priority=20)
+ delayable.method(args, kwargs)
+
+ ``method`` must be a method of the recordset's Model, decorated with
+ :func:`~odoo.addons.queue_job.job.job`.
+
+ The method call will be processed asynchronously in the job queue, with
+ the passed arguments.
+
+
+ """
+
+ def __init__(self, recordset, priority=None, eta=None,
+ max_retries=None, description=None):
+ self.recordset = recordset
+ self.priority = priority
+ self.eta = eta
+ self.max_retries = max_retries
+ self.description = description
+
+ def __getattr__(self, name):
+ if name in self.recordset:
+ raise AttributeError(
+ 'only methods can be delayed (%s called on %s)' %
+ (name, self.recordset)
+ )
+ recordset_method = getattr(self.recordset, name)
+ if not getattr(recordset_method, 'delayable', None):
+ raise AttributeError(
+ 'method %s on %s is not allowed to be delayed, '
+ 'it should be decorated with odoo.addons.queue_job.job.job' %
+ (name, self.recordset)
+ )
+
+ def delay(*args, **kwargs):
+ return Job.enqueue(recordset_method,
+ args=args,
+ kwargs=kwargs,
+ priority=self.priority,
+ max_retries=self.max_retries,
+ eta=self.eta,
+ description=self.description)
+ return delay
+
+ def __str__(self):
+ return "DelayableRecordset(%s%s)" % (
+ self.recordset._name,
+ getattr(self.recordset, '_ids', "")
+ )
+
+ def __unicode__(self):
+ return unicode(str(self))
+
+ __repr__ = __str__
+
+
+class Job(object):
+ """ A Job is a task to execute.
+
+ .. attribute:: uuid
+
+ Id (UUID) of the job.
+
+ .. attribute:: state
+
+ State of the job, can pending, enqueued, started, done or failed.
+ The start state is pending and the final state is done.
+
+ .. attribute:: retry
+
+ The current try, starts at 0 and each time the job is executed,
+ it increases by 1.
+
+ .. attribute:: max_retries
+
+ The maximum number of retries allowed before the job is
+ considered as failed.
+
+ .. attribute:: args
+
+ Arguments passed to the function when executed.
+
+ .. attribute:: kwargs
+
+ Keyword arguments passed to the function when executed.
+
+ .. attribute:: description
+
+ Human description of the job.
+
+ .. attribute:: func
+
+ The python function itself.
+
+ .. attribute:: model_name
+
+ Odoo model on which the job will run.
+
+ .. attribute:: priority
+
+ Priority of the job, 0 being the higher priority.
+
+ .. attribute:: date_created
+
+ Date and time when the job was created.
+
+ .. attribute:: date_enqueued
+
+ Date and time when the job was enqueued.
+
+ .. attribute:: date_started
+
+ Date and time when the job was started.
+
+ .. attribute:: date_done
+
+ Date and time when the job was done.
+
+ .. attribute:: result
+
+ A description of the result (for humans).
+
+ .. attribute:: exc_info
+
+ Exception information (traceback) when the job failed.
+
+ .. attribute:: user_id
+
+ Odoo user id which created the job
+
+ .. attribute:: eta
+
+ Estimated Time of Arrival of the job. It will not be executed
+ before this date/time.
+
+ .. attribute:: recordset
+
+ Model recordset when we are on a delayed Model method
+
+ """
+
+ @classmethod
+ def load(cls, env, job_uuid):
+ """ Read a job from the Database"""
+ stored = cls.db_record_from_uuid(env, job_uuid)
+ if not stored:
+ raise NoSuchJobError(
+ 'Job %s does no longer exist in the storage.' % job_uuid)
+
+ args = stored.args
+ kwargs = stored.kwargs
+ method_name = stored.method_name
+
+ model = env[stored.model_name]
+ recordset = model.browse(stored.record_ids)
+ method = getattr(recordset, method_name)
+
+ dt_from_string = odoo.fields.Datetime.from_string
+ eta = None
+ if stored.eta:
+ eta = dt_from_string(stored.eta)
+
+ job_ = cls(method, args=args, kwargs=kwargs,
+ priority=stored.priority, eta=eta, job_uuid=stored.uuid,
+ description=stored.name)
+
+ if stored.date_created:
+ job_.date_created = dt_from_string(stored.date_created)
+
+ if stored.date_enqueued:
+ job_.date_enqueued = dt_from_string(stored.date_enqueued)
+
+ if stored.date_started:
+ job_.date_started = dt_from_string(stored.date_started)
+
+ if stored.date_done:
+ job_.date_done = dt_from_string(stored.date_done)
+
+ job_.state = stored.state
+ job_.result = stored.result if stored.result else None
+ job_.exc_info = stored.exc_info if stored.exc_info else None
+ job_.user_id = stored.user_id.id if stored.user_id else None
+ job_.model_name = stored.model_name if stored.model_name else None
+ job_.retry = stored.retry
+ job_.max_retries = stored.max_retries
+ if stored.company_id:
+ job_.company_id = stored.company_id.id
+ return job_
+
+ @classmethod
+ def enqueue(cls, func, args=None, kwargs=None,
+ priority=None, eta=None, max_retries=None, description=None):
+ """Create a Job and enqueue it in the queue. Return the job uuid.
+
+ This expects the arguments specific to the job to be already extracted
+ from the ones to pass to the job function.
+
+ """
+ new_job = cls(func=func, args=args,
+ kwargs=kwargs, priority=priority, eta=eta,
+ max_retries=max_retries, description=description)
+ new_job.store()
+ return new_job
+
+ @staticmethod
+ def db_record_from_uuid(env, job_uuid):
+ model = env['queue.job'].sudo()
+ record = model.search([('uuid', '=', job_uuid)], limit=1)
+ return record.with_env(env)
+
+ def __init__(self, func,
+ args=None, kwargs=None, priority=None,
+ eta=None, job_uuid=None, max_retries=None,
+ description=None):
+ """ Create a Job
+
+ :param func: function to execute
+ :type func: function
+ :param args: arguments for func
+ :type args: tuple
+ :param kwargs: keyworkd arguments for func
+ :type kwargs: dict
+ :param priority: priority of the job,
+ the smaller is the higher priority
+ :type priority: int
+ :param eta: the job can be executed only after this datetime
+ (or now + timedelta)
+ :type eta: datetime or timedelta
+ :param job_uuid: UUID of the job
+ :param max_retries: maximum number of retries before giving up and set
+ the job state to 'failed'. A value of 0 means infinite retries.
+ :param description: human description of the job. If None, description
+ is computed from the function doc or name
+ :param env: Odoo Environment
+ :type env: :class:`odoo.api.Environment`
+ """
+ if args is None:
+ args = ()
+ if isinstance(args, list):
+ args = tuple(args)
+ assert isinstance(args, tuple), "%s: args are not a tuple" % args
+ if kwargs is None:
+ kwargs = {}
+
+ assert isinstance(kwargs, dict), "%s: kwargs are not a dict" % kwargs
+
+ if (not inspect.ismethod(func) or
+ not isinstance(func.im_class, odoo.models.MetaModel)):
+ raise TypeError("Job accepts only methods of Models")
+
+ recordset = func.im_self
+ env = recordset.env
+ self.model_name = func.im_class._name
+ self.method_name = func.im_func.func_name
+ self.recordset = recordset
+
+ self.env = env
+ self.job_model = self.env['queue.job']
+ self.job_model_name = 'queue.job'
+
+ self.state = PENDING
+
+ self.retry = 0
+ if max_retries is None:
+ self.max_retries = DEFAULT_MAX_RETRIES
+ else:
+ self.max_retries = max_retries
+
+ self._uuid = job_uuid
+
+ self.args = args
+ self.kwargs = kwargs
+
+ self.priority = priority
+ if self.priority is None:
+ self.priority = DEFAULT_PRIORITY
+
+ self.date_created = datetime.now()
+ self._description = description
+
+ self.date_enqueued = None
+ self.date_started = None
+ self.date_done = None
+
+ self.result = None
+ self.exc_info = None
+
+ self.user_id = env.uid
+ if 'company_id' in env.context:
+ company_id = env.context['company_id']
+ else:
+ company_model = env['res.company']
+ company_model = company_model.sudo(self.user_id)
+ company_id = company_model._company_default_get(
+ object='queue.job',
+ field='company_id'
+ ).id
+ self.company_id = company_id
+ self._eta = None
+ self.eta = eta
+
+ def perform(self):
+ """ Execute the job.
+
+ The job is executed with the user which has initiated it.
+ """
+ self.retry += 1
+ try:
+ self.result = self.func(*tuple(self.args), **self.kwargs)
+ except RetryableJobError as err:
+ if err.ignore_retry:
+ self.retry -= 1
+ raise
+ elif not self.max_retries: # infinite retries
+ raise
+ elif self.retry >= self.max_retries:
+ type_, value, traceback = sys.exc_info()
+ # change the exception type but keep the original
+ # traceback and message:
+ # http://blog.ianbicking.org/2007/09/12/re-raising-exceptions/
+ new_exc = FailedJobError("Max. retries (%d) reached: %s" %
+ (self.max_retries, value or type_)
+ )
+ raise new_exc.__class__, new_exc, traceback
+ raise
+ return self.result
+
+ def store(self):
+ """ Store the Job """
+ vals = {'state': self.state,
+ 'priority': self.priority,
+ 'retry': self.retry,
+ 'max_retries': self.max_retries,
+ 'exc_info': self.exc_info,
+ 'user_id': self.user_id or self.env.uid,
+ 'company_id': self.company_id,
+ 'result': unicode(self.result) if self.result else False,
+ 'date_enqueued': False,
+ 'date_started': False,
+ 'date_done': False,
+ 'eta': False,
+ }
+
+ dt_to_string = odoo.fields.Datetime.to_string
+ if self.date_enqueued:
+ vals['date_enqueued'] = dt_to_string(self.date_enqueued)
+ if self.date_started:
+ vals['date_started'] = dt_to_string(self.date_started)
+ if self.date_done:
+ vals['date_done'] = dt_to_string(self.date_done)
+ if self.eta:
+ vals['eta'] = dt_to_string(self.eta)
+
+ db_record = self.db_record()
+ if db_record:
+ db_record.write(vals)
+ else:
+ date_created = dt_to_string(self.date_created)
+ # The following values must never be modified after the
+ # creation of the job
+ vals.update({'uuid': self.uuid,
+ 'name': self.description,
+ 'date_created': date_created,
+ 'model_name': self.model_name,
+ 'method_name': self.method_name,
+ 'record_ids': self.recordset.ids,
+ 'args': self.args,
+ 'kwargs': self.kwargs,
+ })
+
+ self.env[self.job_model_name].sudo().create(vals)
+
+ def db_record(self):
+ return self.db_record_from_uuid(self.env, self.uuid)
+
+ @property
+ def func(self):
+ recordset = self.recordset.with_context(job_uuid=self.uuid)
+ recordset = recordset.sudo(self.user_id)
+ return getattr(recordset, self.method_name)
+
+ @property
+ def description(self):
+ if self._description:
+ return self._description
+ elif self.func.__doc__:
+ return self.func.__doc__.splitlines()[0].strip()
+ else:
+ return '%s.%s' % (self.model_name, self.func.__name__)
+
+ @property
+ def uuid(self):
+ """Job ID, this is an UUID """
+ if self._uuid is None:
+ self._uuid = unicode(uuid.uuid4())
+ return self._uuid
+
+ @property
+ def eta(self):
+ return self._eta
+
+ @eta.setter
+ def eta(self, value):
+ if not value:
+ self._eta = None
+ elif isinstance(value, timedelta):
+ self._eta = datetime.now() + value
+ elif isinstance(value, int):
+ self._eta = datetime.now() + timedelta(seconds=value)
+ else:
+ self._eta = value
+
+ def set_pending(self, result=None, reset_retry=True):
+ self.state = PENDING
+ self.date_enqueued = None
+ self.date_started = None
+ if reset_retry:
+ self.retry = 0
+ if result is not None:
+ self.result = result
+
+ def set_enqueued(self):
+ self.state = ENQUEUED
+ self.date_enqueued = datetime.now()
+ self.date_started = None
+
+ def set_started(self):
+ self.state = STARTED
+ self.date_started = datetime.now()
+
+ def set_done(self, result=None):
+ self.state = DONE
+ self.exc_info = None
+ self.date_done = datetime.now()
+ if result is not None:
+ self.result = result
+
+ def set_failed(self, exc_info=None):
+ self.state = FAILED
+ if exc_info is not None:
+ self.exc_info = exc_info
+
+ def __repr__(self):
+ return '' % (self.uuid, self.priority)
+
+ def _get_retry_seconds(self, seconds=None):
+ retry_pattern = self.func.retry_pattern
+ if not seconds and retry_pattern:
+ # ordered from higher to lower count of retries
+ patt = sorted(retry_pattern.iteritems(), key=lambda t: t[0])
+ seconds = RETRY_INTERVAL
+ for retry_count, postpone_seconds in patt:
+ if self.retry >= retry_count:
+ seconds = postpone_seconds
+ else:
+ break
+ elif not seconds:
+ seconds = RETRY_INTERVAL
+ return seconds
+
+ def postpone(self, result=None, seconds=None):
+ """ Write an estimated time arrival to n seconds
+ later than now. Used when an retryable exception
+ want to retry a job later. """
+ eta_seconds = self._get_retry_seconds(seconds)
+ self.eta = timedelta(seconds=eta_seconds)
+ self.exc_info = None
+ if result is not None:
+ self.result = result
+
+ def related_action(self):
+ if not hasattr(self.func, 'related_action'):
+ return None
+ if not self.func.related_action:
+ return None
+ if not isinstance(self.func.related_action, basestring):
+ raise ValueError('related_action must be the name of the '
+ 'method on queue.job as string')
+ action = getattr(self.db_record(), self.func.related_action)
+ return action(**self.func.kwargs)
+
+
+def _is_model_method(func):
+ return (inspect.ismethod(func) and
+ isinstance(func.im_class, odoo.models.MetaModel))
+
+
+def job(func=None, default_channel='root', retry_pattern=None):
+ """ Decorator for jobs.
+
+ Optional argument:
+
+ :param default_channel: the channel wherein the job will be assigned. This
+ channel is set at the installation of the module
+ and can be manually changed later using the views.
+ :param retry_pattern: The retry pattern to use for postponing a job.
+ If a job is postponed and there is no eta
+ specified, the eta will be determined from the
+ dict in retry_pattern. When no retry pattern
+ is provided, jobs will be retried after
+ :const:`RETRY_INTERVAL` seconds.
+ :type retry_pattern: dict(retry_count,retry_eta_seconds)
+
+ Indicates that a method of a Model can be delayed in the Job Queue.
+
+ When a method has the ``@job`` decorator, its calls can then be delayed
+ with::
+
+ recordset.with_delay(priority=10).the_method(args, **kwargs)
+
+ Where ``the_method`` is the method decorated with ``@job``. Its arguments
+ and keyword arguments will be kept in the Job Queue for its asynchronous
+ execution.
+
+ ``default_channel`` indicates in which channel the job must be executed
+
+ ``retry_pattern`` is a dict where keys are the count of retries and the
+ values are the delay to postpone a job.
+
+ Example:
+
+ .. code-block:: python
+
+ class ProductProduct(models.Model):
+ _inherit = 'product.product'
+
+ @api.multi
+ @job
+ def export_one_thing(self, one_thing):
+ # work
+ # export one_thing
+
+ # [...]
+
+ env['a.model'].export_one_thing(the_thing_to_export)
+ # => normal and synchronous function call
+
+ env['a.model'].with_delay().export_one_thing(the_thing_to_export)
+ # => the job will be executed as soon as possible
+
+ delayable = env['a.model'].with_delay(priority=30, eta=60*60*5)
+ delayable.export_one_thing(the_thing_to_export)
+ # => the job will be executed with a low priority and not before a
+ # delay of 5 hours from now
+
+ @job(default_channel='root.subchannel')
+ def export_one_thing(one_thing):
+ # work
+ # export one_thing
+
+ @job(retry_pattern={1: 10 * 60,
+ 5: 20 * 60,
+ 10: 30 * 60,
+ 15: 12 * 60 * 60})
+ def retryable_example():
+ # 5 first retries postponed 10 minutes later
+ # retries 5 to 10 postponed 20 minutes later
+ # retries 10 to 15 postponed 30 minutes later
+ # all subsequent retries postponed 12 hours later
+ raise RetryableJobError('Must be retried later')
+
+ env['a.model'].with_delay().retryable_example()
+
+
+ See also: :py:func:`related_action` a related action can be attached
+ to a job
+
+ """
+ if func is None:
+ return functools.partial(job, default_channel=default_channel,
+ retry_pattern=retry_pattern)
+
+ def delay_from_model(*args, **kwargs):
+ raise AttributeError(
+ "method.delay() can no longer be used, the general form is "
+ "env['res.users'].with_delay().method()"
+ )
+
+ assert default_channel == 'root' or default_channel.startswith('root.'), (
+ "The channel path must start by 'root'")
+ assert retry_pattern is None or isinstance(retry_pattern, dict), (
+ "retry_pattern must be a dict"
+ )
+
+ delay_func = delay_from_model
+
+ func.delayable = True
+ func.delay = delay_func
+ func.retry_pattern = retry_pattern
+ func.default_channel = default_channel
+ return func
+
+
+def related_action(action=None, **kwargs):
+ """ Attach a *Related Action* to a job.
+
+ A *Related Action* will appear as a button on the Odoo view.
+ The button will execute the action, usually it will open the
+ form view of the record related to the job.
+
+ The ``action`` must be a method on the `queue.job` model.
+
+ Example usage:
+
+ .. code-block:: python
+
+ class QueueJob(models.Model):
+ _inherit = 'queue.job'
+
+ @api.multi
+ def related_action_partner(self):
+ self.ensure_one()
+ model = self.model_name
+ partner = self.env[model].browse(self.record_ids)
+ # possibly get the real ID if partner_id is a binding ID
+ action = {
+ 'name': _("Partner"),
+ 'type': 'ir.actions.act_window',
+ 'res_model': model,
+ 'view_type': 'form',
+ 'view_mode': 'form',
+ 'res_id': partner.id,
+ }
+ return action
+
+ class ResPartner(models.Model):
+ _inherit = 'res.partner'
+
+ @api.multi
+ @job
+ @related_action(action='related_action_partner')
+ def export_partner(self):
+ # ...
+
+ The kwargs are transmitted to the action:
+
+ .. code-block:: python
+
+ class QueueJob(models.Model):
+ _inherit = 'queue.job'
+
+ @api.multi
+ def related_action_product(self, extra_arg=1):
+ assert extra_arg == 2
+ model = self.model_name
+ ...
+
+ class ProductProduct(models.Model):
+ _inherit = 'product.product'
+
+ @api.multi
+ @job
+ @related_action(action='related_action_product', extra_arg=2)
+ def export_product(self):
+ # ...
+
+ """
+ def decorate(func):
+ func.related_action = action
+ func.kwargs = kwargs
+ return func
+ return decorate
diff --git a/queue_job/jobrunner/__init__.py b/queue_job/jobrunner/__init__.py
new file mode 100644
index 0000000000..c48065c1d5
--- /dev/null
+++ b/queue_job/jobrunner/__init__.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2016 ACSONE SA/NV ()
+# Copyright 2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import logging
+import os
+from threading import Thread
+import time
+
+from odoo.service import server
+from odoo.tools import config
+
+from .runner import QueueJobRunner
+
+_logger = logging.getLogger(__name__)
+
+START_DELAY = 5
+
+
+# Here we monkey patch the Odoo server to start the job runner thread
+# in the main server process (and not in forked workers). This is
+# very easy to deploy as we don't need another startup script.
+
+
+class QueueJobRunnerThread(Thread):
+
+ def __init__(self):
+ Thread.__init__(self)
+ self.daemon = True
+ port = os.environ.get('ODOO_QUEUE_JOB_PORT') or config['xmlrpc_port']
+ self.runner = QueueJobRunner(port or 8069)
+
+ def run(self):
+ # sleep a bit to let the workers start at ease
+ time.sleep(START_DELAY)
+ self.runner.run()
+
+ def stop(self):
+ self.runner.stop()
+
+
+runner_thread = None
+
+orig_prefork_start = server.PreforkServer.start
+orig_prefork_stop = server.PreforkServer.stop
+orig_threaded_start = server.ThreadedServer.start
+orig_threaded_stop = server.ThreadedServer.stop
+
+
+def prefork_start(server, *args, **kwargs):
+ global runner_thread
+ res = orig_prefork_start(server, *args, **kwargs)
+ if not config['stop_after_init']:
+ _logger.info("starting jobrunner thread (in prefork server)")
+ runner_thread = QueueJobRunnerThread()
+ runner_thread.start()
+ return res
+
+
+def prefork_stop(server, graceful=True):
+ global runner_thread
+ if runner_thread:
+ runner_thread.stop()
+ res = orig_prefork_stop(server, graceful)
+ if runner_thread:
+ runner_thread.join()
+ runner_thread = None
+ return res
+
+
+def threaded_start(server, *args, **kwargs):
+ global runner_thread
+ res = orig_threaded_start(server, *args, **kwargs)
+ if not config['stop_after_init']:
+ _logger.info("starting jobrunner thread (in threaded server)")
+ runner_thread = QueueJobRunnerThread()
+ runner_thread.start()
+ return res
+
+
+def threaded_stop(server):
+ global runner_thread
+ if runner_thread:
+ runner_thread.stop()
+ res = orig_threaded_stop(server)
+ if runner_thread:
+ runner_thread.join()
+ runner_thread = None
+ return res
+
+
+server.PreforkServer.start = prefork_start
+server.PreforkServer.stop = prefork_stop
+server.ThreadedServer.start = threaded_start
+server.ThreadedServer.stop = threaded_stop
diff --git a/queue_job/jobrunner/channels.py b/queue_job/jobrunner/channels.py
new file mode 100644
index 0000000000..85c73a5f32
--- /dev/null
+++ b/queue_job/jobrunner/channels.py
@@ -0,0 +1,1055 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2016 ACSONE SA/NV ()
+# Copyright 2015-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+from heapq import heappush, heappop
+import logging
+from weakref import WeakValueDictionary
+
+from ..exception import ChannelNotFound
+from ..job import PENDING, ENQUEUED, STARTED, FAILED, DONE
+NOT_DONE = (PENDING, ENQUEUED, STARTED, FAILED)
+
+_logger = logging.getLogger(__name__)
+
+
+class PriorityQueue(object):
+ """A priority queue that supports removing arbitrary objects.
+
+ Adding an object already in the queue is a no op.
+ Popping an empty queue returns None.
+
+ >>> q = PriorityQueue()
+ >>> q.add(2)
+ >>> q.add(3)
+ >>> q.add(3)
+ >>> q.add(1)
+ >>> q[0]
+ 1
+ >>> len(q)
+ 3
+ >>> q.pop()
+ 1
+ >>> q.remove(2)
+ >>> len(q)
+ 1
+ >>> q[0]
+ 3
+ >>> q.pop()
+ 3
+ >>> q.pop()
+ >>> q.add(2)
+ >>> q.remove(2)
+ >>> q.add(2)
+ >>> q.pop()
+ 2
+ """
+
+ def __init__(self):
+ self._heap = []
+ self._known = set() # all objects in the heap (including removed)
+ self._removed = set() # all objects that have been removed
+
+ def __len__(self):
+ return len(self._known) - len(self._removed)
+
+ def __getitem__(self, i):
+ if i != 0:
+ raise IndexError()
+ while True:
+ if not self._heap:
+ raise IndexError()
+ o = self._heap[0]
+ if o in self._removed:
+ o2 = heappop(self._heap)
+ assert o2 == o
+ self._removed.remove(o)
+ self._known.remove(o)
+ else:
+ return o
+
+ def __contains__(self, o):
+ return o in self._known and o not in self._removed
+
+ def add(self, o):
+ if o is None:
+ raise ValueError()
+ if o in self._removed:
+ self._removed.remove(o)
+ if o in self._known:
+ return
+ self._known.add(o)
+ heappush(self._heap, o)
+
+ def remove(self, o):
+ if o is None:
+ raise ValueError()
+ if o not in self._known:
+ return
+ if o not in self._removed:
+ self._removed.add(o)
+
+ def pop(self):
+ while True:
+ try:
+ o = heappop(self._heap)
+ except IndexError:
+ # queue is empty
+ return None
+ self._known.remove(o)
+ if o in self._removed:
+ self._removed.remove(o)
+ else:
+ return o
+
+
+class SafeSet(set):
+ """A set that does not raise KeyError when removing non-existent items.
+
+ >>> s = SafeSet()
+ >>> s.remove(1)
+ >>> len(s)
+ 0
+ >>> s.remove(1)
+ """
+ def remove(self, o):
+ try:
+ super(SafeSet, self).remove(o)
+ except KeyError:
+ pass
+
+
+class ChannelJob(object):
+ """A channel job is attached to a channel and holds the properties of a
+ job that are necessary to prioritise them.
+
+ Channel jobs are comparable according to the following rules:
+ * jobs with an eta come before all other jobs
+ * then jobs with a smaller eta come first
+ * then jobs with a smaller priority come first
+ * then jobs with a smaller creation time come first
+ * then jobs with a smaller sequence come first
+
+ Here are some examples.
+
+ j1 comes before j2 before it has a smaller date_created
+
+ >>> j1 = ChannelJob(None, None, 1,
+ ... seq=0, date_created=1, priority=9, eta=None)
+ >>> j1
+
+ >>> j2 = ChannelJob(None, None, 2,
+ ... seq=0, date_created=2, priority=9, eta=None)
+ >>> j1 < j2
+ True
+
+ j3 comes first because it has lower priority,
+ despite having a creation date after j1 and j2
+
+ >>> j3 = ChannelJob(None, None, 3,
+ ... seq=0, date_created=3, priority=2, eta=None)
+ >>> j3 < j1
+ True
+
+ j4 and j5 comes even before j3, because they have an eta
+
+ >>> j4 = ChannelJob(None, None, 4,
+ ... seq=0, date_created=4, priority=9, eta=9)
+ >>> j5 = ChannelJob(None, None, 5,
+ ... seq=0, date_created=5, priority=9, eta=9)
+ >>> j4 < j5 < j3
+ True
+
+ j6 has same date_created and priority as j5 but a smaller eta
+
+ >>> j6 = ChannelJob(None, None, 6,
+ ... seq=0, date_created=5, priority=9, eta=2)
+ >>> j6 < j4 < j5
+ True
+
+ Here is the complete suite:
+
+ >>> j6 < j4 < j5 < j3 < j1 < j2
+ True
+
+ j0 has the same properties as j1 but they are not considered
+ equal as they are different instances
+
+ >>> j0 = ChannelJob(None, None, 1,
+ ... seq=0, date_created=1, priority=9, eta=None)
+ >>> j0 == j1
+ False
+ >>> j0 == j0
+ True
+
+ Comparison excluding eta:
+
+ >>> j1.cmp_no_eta(j2)
+ -1
+ """
+
+ def __init__(self, db_name, channel, uuid,
+ seq, date_created, priority, eta):
+ self.db_name = db_name
+ self.channel = channel
+ self.uuid = uuid
+ self.seq = seq
+ self.date_created = date_created
+ self.priority = priority
+ self.eta = eta
+
+ def __repr__(self):
+ return "" % self.uuid
+
+ def __eq__(self, other):
+ return id(self) == id(other)
+
+ def __hash__(self):
+ return id(self)
+
+ def cmp_no_eta(self, other):
+ return (cmp(self.priority, other.priority) or
+ cmp(self.date_created, other.date_created) or
+ cmp(self.seq, other.seq))
+
+ def __cmp__(self, other):
+ if self.eta and not other.eta:
+ return -1
+ elif not self.eta and other.eta:
+ return 1
+ else:
+ return (cmp(self.eta, other.eta) or
+ self.cmp_no_eta(other))
+
+
+class ChannelQueue(object):
+ """A channel queue is a priority queue for jobs.
+
+ Jobs with an eta are set aside until their eta is past due, at
+ which point they start competing normally with other jobs.
+
+ >>> q = ChannelQueue()
+ >>> j1 = ChannelJob(None, None, 1,
+ ... seq=0, date_created=1, priority=1, eta=10)
+ >>> j2 = ChannelJob(None, None, 2,
+ ... seq=0, date_created=2, priority=1, eta=None)
+ >>> j3 = ChannelJob(None, None, 3,
+ ... seq=0, date_created=3, priority=1, eta=None)
+ >>> q.add(j1)
+ >>> q.add(j2)
+ >>> q.add(j3)
+
+ Wakeup time is the eta of job 1.
+
+ >>> q.get_wakeup_time()
+ 10
+
+ We have not reached the eta of job 1, so we get job 2.
+
+ >>> q.pop(now=1)
+
+
+ Wakeup time is still the eta of job 1, and we get job 1 when we are past
+ it's eta.
+
+ >>> q.get_wakeup_time()
+ 10
+ >>> q.pop(now=11)
+
+
+ Now there is no wakeup time anymore, because no job have an eta.
+
+ >>> q.get_wakeup_time()
+ 0
+ >>> q.pop(now=12)
+
+ >>> q.get_wakeup_time()
+ 0
+ >>> q.pop(now=13)
+
+ Observe that job with past eta still run after jobs with higher priority.
+
+ >>> j4 = ChannelJob(None, None, 4,
+ ... seq=0, date_created=4, priority=10, eta=20)
+ >>> j5 = ChannelJob(None, None, 5,
+ ... seq=0, date_created=5, priority=1, eta=None)
+ >>> q.add(j4)
+ >>> q.add(j5)
+ >>> q.get_wakeup_time()
+ 20
+ >>> q.pop(21)
+
+ >>> q.get_wakeup_time()
+ 0
+ >>> q.pop(22)
+
+
+ Test a sequential queue.
+
+ >>> sq = ChannelQueue(sequential=True)
+ >>> j6 = ChannelJob(None, None, 6,
+ ... seq=0, date_created=6, priority=1, eta=None)
+ >>> j7 = ChannelJob(None, None, 7,
+ ... seq=0, date_created=7, priority=1, eta=20)
+ >>> j8 = ChannelJob(None, None, 8,
+ ... seq=0, date_created=8, priority=1, eta=None)
+ >>> sq.add(j6)
+ >>> sq.add(j7)
+ >>> sq.add(j8)
+ >>> sq.pop(10)
+
+ >>> sq.pop(15)
+ >>> sq.pop(20)
+
+ >>> sq.pop(30)
+
+ """
+
+ def __init__(self, sequential=False):
+ self._queue = PriorityQueue()
+ self._eta_queue = PriorityQueue()
+ self.sequential = sequential
+
+ def __len__(self):
+ return len(self._eta_queue) + len(self._queue)
+
+ def __contains__(self, o):
+ return o in self._eta_queue or o in self._queue
+
+ def add(self, job):
+ if job.eta:
+ self._eta_queue.add(job)
+ else:
+ self._queue.add(job)
+
+ def remove(self, job):
+ self._eta_queue.remove(job)
+ self._queue.remove(job)
+
+ def pop(self, now):
+ while len(self._eta_queue) and self._eta_queue[0].eta <= now:
+ eta_job = self._eta_queue.pop()
+ eta_job.eta = None
+ self._queue.add(eta_job)
+ if self.sequential and len(self._eta_queue) and len(self._queue):
+ eta_job = self._eta_queue[0]
+ job = self._queue[0]
+ if eta_job.cmp_no_eta(job) < 0:
+ # eta ignored, the job with eta has higher priority
+ # than the job without eta; since it's a sequential
+ # queue we wait until eta
+ return
+ return self._queue.pop()
+
+ def get_wakeup_time(self, wakeup_time=0):
+ if len(self._eta_queue):
+ if not wakeup_time:
+ wakeup_time = self._eta_queue[0].eta
+ else:
+ wakeup_time = min(wakeup_time, self._eta_queue[0].eta)
+ return wakeup_time
+
+
+class Channel(object):
+ """A channel for jobs, with a maximum capacity.
+
+ When jobs are created by queue_job modules, they may be associated
+ to a job channel. Jobs with no channel are inserted into the root channel.
+
+ Job channels are joined in a hierarchy down to the root channel.
+ When a job channel has available capacity, jobs are dequeued, marked
+ as running in the channel and are inserted into the queue of the
+ parent channel where they wait for available capacity and so on.
+
+ Job channels can be visualized as water channels with a given flow
+ limit (= capacity). Channels are joined together in a downstream channel
+ and the flow limit of the downstream channel limits upstream channels.::
+
+ ---------------------+
+ |
+ |
+ Ch. A C:4,Q:12,R:4 +-----------------------
+
+ ---------------------+ Ch. root C:5,Q:0,R:4
+ |
+ ---------------------+
+ Ch. B C:1,Q:0,R:0
+ ---------------------+-----------------------
+
+ The above diagram illustrates two channels joining in the root channel.
+ The root channel has a capacity of 5, and 4 running jobs coming from
+ Channel A. Channel A has a capacity of 4, all in use (passed down to the
+ root channel), and 12 jobs enqueued. Channel B has a capacity of 1,
+ none in use. This means that whenever a new job comes in channel B,
+ there will be available room for it to run in the root channel.
+
+ Note that from the point of view of a channel, 'running' means enqueued
+ in the downstream channel. Only jobs marked running in the root channel
+ are actually sent to Odoo for execution.
+
+ Should a downstream channel have less capacity than its upstream channels,
+ jobs going downstream will be enqueued in the downstream channel,
+ and compete normally according to their properties (priority, etc).
+
+ Using this technique, it is possible to enforce sequence in a channel
+ with a capacity of 1. It is also possible to dedicate a channel with a
+ limited capacity for application-autocreated subchannels
+ without risking to overflow the system.
+ """
+
+ def __init__(self, name, parent, capacity=None, sequential=False,
+ throttle=0):
+ self.name = name
+ self.parent = parent
+ if self.parent:
+ self.parent.children[name] = self
+ self.children = {}
+ self._queue = ChannelQueue()
+ self._running = SafeSet()
+ self._failed = SafeSet()
+ self._pause_until = 0 # utc seconds since the epoch
+ self.capacity = capacity
+ self.throttle = throttle # seconds
+ self.sequential = sequential
+
+ @property
+ def sequential(self):
+ return self._queue.sequential
+
+ @sequential.setter
+ def sequential(self, val):
+ self._queue.sequential = val
+
+ def configure(self, config):
+ """ Configure a channel from a dictionary.
+
+ Supported keys are:
+
+ * capacity
+ * sequential
+ * throttle
+ """
+ assert self.fullname.endswith(config['name'])
+ self.capacity = config.get('capacity', None)
+ self.sequential = bool(config.get('sequential', False))
+ self.throttle = int(config.get('throttle', 0))
+ if self.sequential and self.capacity != 1:
+ raise ValueError("A sequential channel must have a capacity of 1")
+
+ @property
+ def fullname(self):
+ """ The full name of the channel, in dot separated notation. """
+ if self.parent:
+ return self.parent.fullname + '.' + self.name
+ else:
+ return self.name
+
+ def get_subchannel_by_name(self, subchannel_name):
+ return self.children.get(subchannel_name)
+
+ def __str__(self):
+ capacity = u'∞' if self.capacity is None else str(self.capacity)
+ return "%s(C:%s,Q:%d,R:%d,F:%d)" % (self.fullname,
+ capacity,
+ len(self._queue),
+ len(self._running),
+ len(self._failed))
+
+ def remove(self, job):
+ """ Remove a job from the channel. """
+ self._queue.remove(job)
+ self._running.remove(job)
+ self._failed.remove(job)
+ if self.parent:
+ self.parent.remove(job)
+
+ def set_done(self, job):
+ """ Mark a job as done.
+
+ This removes it from the channel queue.
+ """
+ self.remove(job)
+ _logger.debug("job %s marked done in channel %s",
+ job.uuid, self)
+
+ def set_pending(self, job):
+ """ Mark a job as pending.
+
+ This puts the job in the channel queue and remove it
+ from parent channels queues.
+ """
+ if job not in self._queue:
+ self._queue.add(job)
+ self._running.remove(job)
+ self._failed.remove(job)
+ if self.parent:
+ self.parent.remove(job)
+ _logger.debug("job %s marked pending in channel %s",
+ job.uuid, self)
+
+ def set_running(self, job):
+ """ Mark a job as running.
+
+ This also marks the job as running in parent channels.
+ """
+ if job not in self._running:
+ self._queue.remove(job)
+ self._running.add(job)
+ self._failed.remove(job)
+ if self.parent:
+ self.parent.set_running(job)
+ _logger.debug("job %s marked running in channel %s",
+ job.uuid, self)
+
+ def set_failed(self, job):
+ """ Mark the job as failed. """
+ if job not in self._failed:
+ self._queue.remove(job)
+ self._running.remove(job)
+ self._failed.add(job)
+ if self.parent:
+ self.parent.remove(job)
+ _logger.debug("job %s marked failed in channel %s",
+ job.uuid, self)
+
+ def has_capacity(self):
+ if self.sequential and self._failed:
+ # a sequential queue blocks on failed jobs
+ return False
+ if not self.capacity:
+ # unlimited capacity
+ return True
+ return len(self._running) < self.capacity
+
+ def get_jobs_to_run(self, now):
+ """ Get jobs that are ready to run in channel.
+
+ This works by enqueuing jobs that are ready to run in children
+ channels, then yielding jobs from the channel queue until
+ ``capacity`` jobs are marked running in the channel.
+
+ If the ``throttle`` option is set on the channel, then it yields
+ no job until at least throttle seconds have elapsed since the previous
+ yield.
+
+ :param now: the current datetime in seconds
+
+ :return: iterator of
+ :class:`odoo.addons.queue_job.jobrunner.ChannelJob`
+ """
+ # enqueue jobs of children channels
+ for child in self.children.values():
+ for job in child.get_jobs_to_run(now):
+ self._queue.add(job)
+ # is this channel paused?
+ if self.throttle and self._pause_until:
+ if now < self._pause_until:
+ if self.has_capacity():
+ _logger.debug("channel %s paused until %s because "
+ "of throttle delay between jobs",
+ self, self._pause_until)
+ return
+ else:
+ # unpause, this is important to avoid perpetual wakeup
+ # while the channel is at full capacity
+ self._pause_until = 0
+ _logger.debug("channel %s unpaused at %s", self, now)
+ # yield jobs that are ready to run, while we have capacity
+ while self.has_capacity():
+ job = self._queue.pop(now)
+ if not job:
+ return
+ self._running.add(job)
+ _logger.debug("job %s marked running in channel %s",
+ job.uuid, self)
+ yield job
+ if self.throttle:
+ self._pause_until = now + self.throttle
+ _logger.debug("pausing channel %s until %s",
+ self, self._pause_until)
+ return
+
+ def get_wakeup_time(self, wakeup_time=0):
+ if not self.has_capacity():
+ # this channel is full, do not request timed wakeup, as
+ # a notification will wakeup the runner when a job finishes
+ return wakeup_time
+ if self._pause_until:
+ # this channel is paused, request wakeup at the end of the pause
+ if not wakeup_time:
+ wakeup_time = self._pause_until
+ else:
+ wakeup_time = min(wakeup_time, self._pause_until)
+ # since this channel is paused, no need to look at the
+ # wakeup time of children nor eta jobs, as such jobs would not
+ # run anyway because they would end up in this paused channel
+ return wakeup_time
+ wakeup_time = self._queue.get_wakeup_time(wakeup_time)
+ for child in self.children.values():
+ wakeup_time = child.get_wakeup_time(wakeup_time)
+ return wakeup_time
+
+
+def split_strip(s, sep, maxsplit=-1):
+ """Split string and strip each component.
+
+ >>> split_strip("foo: bar baz\\n: fred:", ":")
+ ['foo', 'bar baz', 'fred', '']
+ """
+ return [x.strip() for x in s.split(sep, maxsplit)]
+
+
+class ChannelManager(object):
+ """ High level interface for channels
+
+ This class handles:
+
+ * configuration of channels
+ * high level api to create and remove jobs (notify, remove_job, remove_db)
+ * get jobs to run
+
+ Here is how the runner will use it.
+
+ Let's create a channel manager and configure it.
+
+ >>> from pprint import pprint as pp
+ >>> cm = ChannelManager()
+ >>> cm.simple_configure('root:4,A:4,B:1')
+ >>> db = 'db'
+
+ Add a few jobs in channel A with priority 10
+
+ >>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'A', 'A3', 3, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'A', 'A4', 4, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'A', 'A5', 5, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'A', 'A6', 6, 0, 10, None, 'pending')
+
+ Add a few jobs in channel B with priority 5
+
+ >>> cm.notify(db, 'B', 'B1', 1, 0, 5, None, 'pending')
+ >>> cm.notify(db, 'B', 'B2', 2, 0, 5, None, 'pending')
+
+ We must now run one job from queue B which has a capacity of 1
+ and 3 jobs from queue A so the root channel capacity of 4 is filled.
+
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ [, , , ]
+
+ Job A2 is done. Next job to run is A5, even if we have
+ higher priority job in channel B, because channel B has a capacity of 1.
+
+ >>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'done')
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+
+ Job B1 is done. Next job to run is B2 because it has higher priority.
+
+ >>> cm.notify(db, 'B', 'B1', 1, 0, 5, None, 'done')
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+
+ Let's say A1 is done and A6 gets a higher priority. A6 will run next.
+
+ >>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'done')
+ >>> cm.notify(db, 'A', 'A6', 6, 0, 5, None, 'pending')
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+
+ Let's test the throttling mechanism. Configure a 2 seconds delay
+ on channel A, end enqueue two jobs.
+
+ >>> cm = ChannelManager()
+ >>> cm.simple_configure('root:4,A:4:throttle=2')
+ >>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'pending')
+
+ We have only one job to run, because of the throttle.
+
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+ >>> cm.get_wakeup_time()
+ 102
+
+ We have no job to run, because of the throttle.
+
+ >>> pp(list(cm.get_jobs_to_run(now=101)))
+ []
+ >>> cm.get_wakeup_time()
+ 102
+
+ 2 seconds later, we can run the other job (even though the first one
+ is still running, because we have enough capacity).
+
+ >>> pp(list(cm.get_jobs_to_run(now=102)))
+ []
+ >>> cm.get_wakeup_time()
+ 104
+
+ Let's test throttling in combination with a queue reaching full capacity.
+
+ >>> cm = ChannelManager()
+ >>> cm.simple_configure('root:4,T:2:throttle=2')
+ >>> cm.notify(db, 'T', 'T1', 1, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'T', 'T2', 2, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'T', 'T3', 3, 0, 10, None, 'pending')
+
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+ >>> pp(list(cm.get_jobs_to_run(now=102)))
+ []
+
+ Channel is now full, so no job to run even though throttling
+ delay is over.
+
+ >>> pp(list(cm.get_jobs_to_run(now=103)))
+ []
+ >>> cm.get_wakeup_time() # no wakeup time, since queue is full
+ 0
+ >>> pp(list(cm.get_jobs_to_run(now=104)))
+ []
+ >>> cm.get_wakeup_time() # queue is still full
+ 0
+
+ >>> cm.notify(db, 'T', 'T1', 1, 0, 10, None, 'done')
+ >>> pp(list(cm.get_jobs_to_run(now=105)))
+ []
+ >>> cm.get_wakeup_time() # queue is full
+ 0
+ >>> cm.notify(db, 'T', 'T2', 1, 0, 10, None, 'done')
+ >>> cm.get_wakeup_time()
+ 107
+
+ Test wakeup time behaviour in presence of eta.
+
+ >>> cm = ChannelManager()
+ >>> cm.simple_configure('root:4,E:1')
+ >>> cm.notify(db, 'E', 'E1', 1, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'E', 'E2', 2, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'E', 'E3', 3, 0, 10, None, 'pending')
+
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+ >>> pp(list(cm.get_jobs_to_run(now=101)))
+ []
+ >>> cm.notify(db, 'E', 'E1', 1, 0, 10, 105, 'pending')
+ >>> cm.get_wakeup_time() # wakeup at eta
+ 105
+ >>> pp(list(cm.get_jobs_to_run(now=102))) # but there is capacity
+ []
+ >>> pp(list(cm.get_jobs_to_run(now=106))) # no capacity anymore
+ []
+ >>> cm.get_wakeup_time() # no timed wakeup because no capacity
+ 0
+ >>> cm.notify(db, 'E', 'E2', 1, 0, 10, None, 'done')
+ >>> cm.get_wakeup_time()
+ 105
+ >>> pp(list(cm.get_jobs_to_run(now=107))) # no capacity anymore
+ []
+ >>> cm.get_wakeup_time()
+ 0
+
+ Test wakeup time behaviour in a sequential queue.
+
+ >>> cm = ChannelManager()
+ >>> cm.simple_configure('root:4,S:1:sequential')
+ >>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'S', 'S2', 2, 0, 10, None, 'pending')
+ >>> cm.notify(db, 'S', 'S3', 3, 0, 10, None, 'pending')
+
+ >>> pp(list(cm.get_jobs_to_run(now=100)))
+ []
+ >>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'failed')
+ >>> pp(list(cm.get_jobs_to_run(now=101)))
+ []
+ >>> cm.notify(db, 'S', 'S2', 2, 0, 10, 105, 'pending')
+ >>> pp(list(cm.get_jobs_to_run(now=102)))
+ []
+
+ No wakeup time because due to eta, because the sequential queue
+ is waiting for a failed job.
+
+ >>> cm.get_wakeup_time()
+ 0
+ >>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'pending')
+ >>> cm.get_wakeup_time()
+ 105
+ >>> pp(list(cm.get_jobs_to_run(now=102)))
+ []
+ >>> pp(list(cm.get_jobs_to_run(now=103)))
+ []
+ >>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'done')
+
+ At this stage, we have S2 with an eta of 105 and since the
+ queue is sequential, we wait for it.
+
+ >>> pp(list(cm.get_jobs_to_run(now=103)))
+ []
+ >>> pp(list(cm.get_jobs_to_run(now=105)))
+ []
+ >>> cm.notify(db, 'S', 'S2', 2, 0, 10, 105, 'done')
+ >>> pp(list(cm.get_jobs_to_run(now=105)))
+ []
+ >>> cm.notify(db, 'S', 'S3', 3, 0, 10, None, 'done')
+ >>> pp(list(cm.get_jobs_to_run(now=105)))
+ []
+
+ """
+
+ def __init__(self):
+ self._jobs_by_uuid = WeakValueDictionary()
+ self._root_channel = Channel(name='root', parent=None, capacity=1)
+ self._channels_by_name = WeakValueDictionary(root=self._root_channel)
+
+ @classmethod
+ def parse_simple_config(cls, config_string):
+ """Parse a simple channels configuration string.
+
+ The general form is as follow:
+ channel(.subchannel)*(:capacity(:key(=value)?)*)? [, ...]
+
+ If capacity is absent, it defaults to 1.
+ If a key is present without value, it gets True as value.
+ When declaring subchannels, the root channel may be omitted
+ (ie sub:4 is the same as root.sub:4).
+
+ Returns a list of channel configuration dictionaries.
+
+ >>> from pprint import pprint as pp
+ >>> pp(ChannelManager.parse_simple_config('root:4'))
+ [{'capacity': 4, 'name': 'root'}]
+ >>> pp(ChannelManager.parse_simple_config('root:4,root.sub:2'))
+ [{'capacity': 4, 'name': 'root'}, {'capacity': 2, 'name': 'root.sub'}]
+ >>> pp(ChannelManager.parse_simple_config('root:4,root.sub:2:'
+ ... 'sequential:k=v'))
+ [{'capacity': 4, 'name': 'root'},
+ {'capacity': 2, 'k': 'v', 'name': 'root.sub', 'sequential': True}]
+ >>> pp(ChannelManager.parse_simple_config('root'))
+ [{'capacity': 1, 'name': 'root'}]
+ >>> pp(ChannelManager.parse_simple_config('sub:2'))
+ [{'capacity': 2, 'name': 'sub'}]
+
+ It ignores whitespace around values, and drops empty entries which
+ would be generated by trailing commas, or commented lines on the Odoo
+ config file.
+
+ >>> pp(ChannelManager.parse_simple_config('''
+ ... root : 4,
+ ... ,
+ ... foo bar:1: k=va lue,
+ ... '''))
+ [{'capacity': 4, 'name': 'root'},
+ {'capacity': 1, 'k': 'va lue', 'name': 'foo bar'}]
+
+ It's also possible to replace commas with line breaks, which is more
+ readable if the channel configuration comes from the odoo config file.
+
+ >>> pp(ChannelManager.parse_simple_config('''
+ ... root : 4
+ ... foo bar:1: k=va lue
+ ... baz
+ ... '''))
+ [{'capacity': 4, 'name': 'root'},
+ {'capacity': 1, 'k': 'va lue', 'name': 'foo bar'},
+ {'capacity': 1, 'name': 'baz'}]
+ """
+ res = []
+ config_string = config_string.replace("\n", ",")
+ for channel_config_string in split_strip(config_string, ','):
+ if not channel_config_string:
+ # ignore empty entries (commented lines, trailing commas)
+ continue
+ config = {}
+ config_items = split_strip(channel_config_string, ':')
+ name = config_items[0]
+ if not name:
+ raise ValueError('Invalid channel config %s: '
+ 'missing channel name' % config_string)
+ config['name'] = name
+ if len(config_items) > 1:
+ capacity = config_items[1]
+ try:
+ config['capacity'] = int(capacity)
+ except:
+ raise ValueError('Invalid channel config %s: '
+ 'invalid capacity %s' %
+ (config_string, capacity))
+ for config_item in config_items[2:]:
+ kv = split_strip(config_item, '=')
+ if len(kv) == 1:
+ k, v = kv[0], True
+ elif len(kv) == 2:
+ k, v = kv
+ else:
+ raise ValueError('Invalid channel config %s: '
+ 'incorrect config item %s' %
+ (config_string, config_item))
+ if k in config:
+ raise ValueError('Invalid channel config %s: '
+ 'duplicate key %s' %
+ (config_string, k))
+ config[k] = v
+ else:
+ config['capacity'] = 1
+ res.append(config)
+ return res
+
+ def simple_configure(self, config_string):
+ """Configure the channel manager from a simple configuration string
+
+ >>> cm = ChannelManager()
+ >>> c = cm.get_channel_by_name('root')
+ >>> c.capacity
+ 1
+ >>> cm.simple_configure('root:4,autosub.sub:2,seq:1:sequential')
+ >>> cm.get_channel_by_name('root').capacity
+ 4
+ >>> cm.get_channel_by_name('root').sequential
+ False
+ >>> cm.get_channel_by_name('root.autosub').capacity
+ >>> cm.get_channel_by_name('root.autosub.sub').capacity
+ 2
+ >>> cm.get_channel_by_name('root.autosub.sub').sequential
+ False
+ >>> cm.get_channel_by_name('autosub.sub').capacity
+ 2
+ >>> cm.get_channel_by_name('seq').capacity
+ 1
+ >>> cm.get_channel_by_name('seq').sequential
+ True
+ """
+ for config in ChannelManager.parse_simple_config(config_string):
+ self.get_channel_from_config(config)
+
+ def get_channel_from_config(self, config):
+ """Return a Channel object from a parsed configuration.
+
+ If the channel does not exist it is created.
+ The configuration is applied on the channel before returning it.
+ If some of the parent channels are missing when creating a subchannel,
+ the parent channels are auto created with an infinite capacity
+ (except for the root channel, which defaults to a capacity of 1
+ when not configured explicity).
+ """
+ channel = self.get_channel_by_name(config['name'], autocreate=True)
+ channel.configure(config)
+ _logger.info("Configured channel: %s", channel)
+ return channel
+
+ def get_channel_by_name(self, channel_name, autocreate=False):
+ """Return a Channel object by its name.
+
+ If it does not exist and autocreate is True, it is created
+ with a default configuration and inserted in the Channels structure.
+ If autocreate is False and the channel does not exist, an exception
+ is raised.
+
+ >>> cm = ChannelManager()
+ >>> c = cm.get_channel_by_name('root', autocreate=False)
+ >>> c.name
+ 'root'
+ >>> c.fullname
+ 'root'
+ >>> c = cm.get_channel_by_name('root.sub', autocreate=True)
+ >>> c.name
+ 'sub'
+ >>> c.fullname
+ 'root.sub'
+ >>> c = cm.get_channel_by_name('sub', autocreate=True)
+ >>> c.name
+ 'sub'
+ >>> c.fullname
+ 'root.sub'
+ >>> c = cm.get_channel_by_name('autosub.sub', autocreate=True)
+ >>> c.name
+ 'sub'
+ >>> c.fullname
+ 'root.autosub.sub'
+ >>> c = cm.get_channel_by_name(None)
+ >>> c.fullname
+ 'root'
+ >>> c = cm.get_channel_by_name('root.sub')
+ >>> c.fullname
+ 'root.sub'
+ >>> c = cm.get_channel_by_name('sub')
+ >>> c.fullname
+ 'root.sub'
+ """
+ if not channel_name or channel_name == self._root_channel.name:
+ return self._root_channel
+ if not channel_name.startswith(self._root_channel.name + '.'):
+ channel_name = self._root_channel.name + '.' + channel_name
+ if channel_name in self._channels_by_name:
+ return self._channels_by_name[channel_name]
+ if not autocreate:
+ raise ChannelNotFound('Channel %s not found' % channel_name)
+ parent = self._root_channel
+ for subchannel_name in channel_name.split('.')[1:]:
+ subchannel = parent.get_subchannel_by_name(subchannel_name)
+ if not subchannel:
+ subchannel = Channel(subchannel_name, parent, capacity=None)
+ self._channels_by_name[subchannel.fullname] = subchannel
+ parent = subchannel
+ return parent
+
+ def notify(self, db_name, channel_name, uuid,
+ seq, date_created, priority, eta, state):
+ try:
+ channel = self.get_channel_by_name(channel_name)
+ except ChannelNotFound:
+ _logger.warning('unknown channel %s, '
+ 'using root channel for job %s',
+ channel_name, uuid)
+ channel = self._root_channel
+ job = self._jobs_by_uuid.get(uuid)
+ if job:
+ # db_name is invariant
+ assert job.db_name == db_name
+ # date_created is invariant
+ assert job.date_created == date_created
+ # if one of the job properties that influence
+ # scheduling order has changed, we remove the job
+ # from the queues and create a new job object
+ if (seq != job.seq or
+ priority != job.priority or
+ eta != job.eta or
+ channel != job.channel):
+ _logger.debug("job %s properties changed, rescheduling it",
+ uuid)
+ self.remove_job(uuid)
+ job = None
+ if not job:
+ job = ChannelJob(db_name, channel, uuid,
+ seq, date_created, priority, eta)
+ self._jobs_by_uuid[uuid] = job
+ # state transitions
+ if not state or state == DONE:
+ job.channel.set_done(job)
+ elif state == PENDING:
+ job.channel.set_pending(job)
+ elif state in (ENQUEUED, STARTED):
+ job.channel.set_running(job)
+ elif state == FAILED:
+ job.channel.set_failed(job)
+ else:
+ _logger.error("unexpected state %s for job %s", state, job)
+
+ def remove_job(self, uuid):
+ job = self._jobs_by_uuid.get(uuid)
+ if job:
+ job.channel.remove(job)
+ del self._jobs_by_uuid[job.uuid]
+
+ def remove_db(self, db_name):
+ for job in self._jobs_by_uuid.values():
+ if job.db_name == db_name:
+ job.channel.remove(job)
+ del self._jobs_by_uuid[job.uuid]
+
+ def get_jobs_to_run(self, now):
+ return self._root_channel.get_jobs_to_run(now)
+
+ def get_wakeup_time(self):
+ return self._root_channel.get_wakeup_time()
diff --git a/queue_job/jobrunner/runner.py b/queue_job/jobrunner/runner.py
new file mode 100644
index 0000000000..0270886003
--- /dev/null
+++ b/queue_job/jobrunner/runner.py
@@ -0,0 +1,403 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2016 ACSONE SA/NV ()
+# Copyright 2015-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+"""
+What is the job runner?
+-----------------------
+The job runner is the main process managing the dispatch of delayed jobs to
+available Odoo workers
+
+How does it work?
+-----------------
+
+* It starts as a thread in the Odoo main process
+* It receives postgres NOTIFY messages each time jobs are
+ added or updated in the queue_job table.
+* It maintains an in-memory priority queue of jobs that
+ is populated from the queue_job tables in all databases.
+* It does not run jobs itself, but asks Odoo to run them through an
+ anonymous ``/queue_job/runjob`` HTTP request. [1]_
+
+How to use it?
+--------------
+
+* Optionally adjust your configuration through environment variables:
+
+ - set ``ODOO_QUEUE_JOB_CHANNELS=root:4`` (or any other channels
+ configuration) if you don't want the default ``root:1``.
+
+ - if ``xmlrpc-port`` is not set, you can set it for the jobrunner only with:
+ ``ODOO_QUEUE_JOB_PORT=8069``.
+
+* Alternatively, configure the channels through the Odoo configuration
+ file, like:
+
+.. code-block:: ini
+
+ [queue_job]
+ channels = root:4
+
+* Or, if using ``anybox.recipe.odoo``, add this to your buildout configuration:
+
+.. code-block:: ini
+
+ [odoo]
+ recipe = anybox.recipe.odoo
+ (...)
+ queue_job.channels = root:4
+
+* Start Odoo with ``--load=web,web_kanban,queue_job``
+ and ``--workers`` greater than 1 [2]_, or set the ``server_wide_modules``
+ option in The Odoo configuration file:
+
+.. code-block:: ini
+
+ [options]
+ (...)
+ workers = 4
+ server_wide_modules = web,web_kanban,queue_job
+ (...)
+
+* Or, if using ``anybox.recipe.odoo``:
+
+.. code-block:: ini
+
+ [odoo]
+ recipe = anybox.recipe.odoo
+ (...)
+ options.workers = 4
+ options.server_wide_modules = web,web_kanban,queue_job
+
+* Confirm the runner is starting correctly by checking the odoo log file:
+
+.. code-block:: none
+
+ ...INFO...queue_job.jobrunner.runner: starting
+ ...INFO...queue_job.jobrunner.runner: initializing database connections
+ ...INFO...queue_job.jobrunner.runner: queue job runner ready for db
+ ...INFO...queue_job.jobrunner.runner: database connections ready
+
+* Create jobs (eg using base_import_async) and observe they
+ start immediately and in parallel.
+
+* Tip: to enable debug logging for the queue job, use
+ ``--log-handler=odoo.addons.queue_job:DEBUG``
+
+Caveat
+------
+
+* After creating a new database or installing queue_job on an
+ existing database, Odoo must be restarted for the runner to detect it.
+
+* When Odoo shuts down normally, it waits for running jobs to finish.
+ However, when the Odoo server crashes or is otherwise force-stopped,
+ running jobs are interrupted while the runner has no chance to know
+ they have been aborted. In such situations, jobs may remain in
+ ``started`` or ``enqueued`` state after the Odoo server is halted.
+ Since the runner has no way to know if they are actually running or
+ not, and does not know for sure if it is safe to restart the jobs,
+ it does not attempt to restart them automatically. Such stale jobs
+ therefore fill the running queue and prevent other jobs to start.
+ You must therefore requeue them manually, either from the Jobs view,
+ or by running the following SQL statement *before starting Odoo*:
+
+.. code-block:: sql
+
+ update queue_job set state='pending' where state in ('started', 'enqueued')
+
+.. rubric:: Footnotes
+
+.. [1] From a security standpoint, it is safe to have an anonymous HTTP
+ request because this request only accepts to run jobs that are
+ enqueued.
+.. [2] It works with the threaded Odoo server too, although this way
+ of running Odoo is obviously not for production purposes.
+"""
+
+from contextlib import closing
+import datetime
+import logging
+import os
+import select
+import threading
+import time
+
+import psycopg2
+from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
+import requests
+
+import odoo
+from odoo.tools import config
+
+from .channels import ChannelManager, PENDING, ENQUEUED, NOT_DONE
+
+SELECT_TIMEOUT = 60
+ERROR_RECOVERY_DELAY = 5
+
+_logger = logging.getLogger(__name__)
+
+
+# Unfortunately, it is not possible to extend the Odoo
+# server command line arguments, so we resort to environment variables
+# to configure the runner (channels mostly).
+#
+# On the other hand, the odoo configuration file can be extended at will,
+# so we check it in addition to the environment variables.
+
+
+def _channels():
+ return (
+ os.environ.get('ODOO_QUEUE_JOB_CHANNELS') or
+ config.misc.get("queue_job", {}).get("channels") or
+ "root:1"
+ )
+
+
+def _datetime_to_epoch(dt):
+ # important: this must return the same as postgresql
+ # EXTRACT(EPOCH FROM TIMESTAMP dt)
+ return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
+
+
+def _odoo_now():
+ dt = datetime.datetime.utcnow()
+ return _datetime_to_epoch(dt)
+
+
+def _async_http_get(port, db_name, job_uuid):
+ # Method to set failed job (due to timeout, etc) as pending,
+ # to avoid keeping it as enqueued.
+ def set_job_pending():
+ connection_info = odoo.sql_db.connection_info_for(db_name)[1]
+ conn = psycopg2.connect(**connection_info)
+ conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
+ with closing(conn.cursor()) as cr:
+ cr.execute(
+ "UPDATE queue_job SET state=%s, "
+ "date_enqueued=NULL, date_started=NULL "
+ "WHERE uuid=%s and state=%s", (PENDING, job_uuid, ENQUEUED)
+ )
+
+ # TODO: better way to HTTP GET asynchronously (grequest, ...)?
+ # if this was python3 I would be doing this with
+ # asyncio, aiohttp and aiopg
+ def urlopen():
+ url = ('http://localhost:%s/queue_job/runjob?db=%s&job_uuid=%s' %
+ (port, db_name, job_uuid))
+ try:
+ # we are not interested in the result, so we set a short timeout
+ # but not too short so we trap and log hard configuration errors
+ requests.get(url, timeout=1)
+ except requests.Timeout:
+ set_job_pending()
+ except:
+ _logger.exception("exception in GET %s", url)
+ set_job_pending()
+ thread = threading.Thread(target=urlopen)
+ thread.daemon = True
+ thread.start()
+
+
+class Database(object):
+
+ def __init__(self, db_name):
+ self.db_name = db_name
+ connection_info = odoo.sql_db.connection_info_for(db_name)[1]
+ self.conn = psycopg2.connect(**connection_info)
+ self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
+ self.has_queue_job = self._has_queue_job()
+ if self.has_queue_job:
+ self._initialize()
+
+ def close(self):
+ try:
+ self.conn.close()
+ except:
+ pass
+ self.conn = None
+
+ def _has_queue_job(self):
+ with closing(self.conn.cursor()) as cr:
+ try:
+ cr.execute("SELECT 1 FROM ir_module_module "
+ "WHERE name=%s AND state=%s",
+ ('queue_job', 'installed'))
+ except psycopg2.ProgrammingError as err:
+ if unicode(err).startswith('relation "ir_module_module" '
+ 'does not exist'):
+ return False
+ else:
+ raise
+ return cr.fetchone()
+
+ def _initialize(self):
+ with closing(self.conn.cursor()) as cr:
+ # this is the trigger that sends notifications when jobs change
+ cr.execute("""
+ DROP TRIGGER IF EXISTS queue_job_notify ON queue_job;
+
+ CREATE OR REPLACE
+ FUNCTION queue_job_notify() RETURNS trigger AS $$
+ BEGIN
+ IF TG_OP = 'DELETE' THEN
+ IF OLD.state != 'done' THEN
+ PERFORM pg_notify('queue_job', OLD.uuid);
+ END IF;
+ ELSE
+ PERFORM pg_notify('queue_job', NEW.uuid);
+ END IF;
+ RETURN NULL;
+ END;
+ $$ LANGUAGE plpgsql;
+
+ CREATE TRIGGER queue_job_notify
+ AFTER INSERT OR UPDATE OR DELETE
+ ON queue_job
+ FOR EACH ROW EXECUTE PROCEDURE queue_job_notify();
+ """)
+ cr.execute("LISTEN queue_job")
+
+ def select_jobs(self, where, args):
+ query = ("SELECT channel, uuid, id as seq, date_created, "
+ "priority, EXTRACT(EPOCH FROM eta), state "
+ "FROM queue_job WHERE %s" %
+ (where, ))
+ with closing(self.conn.cursor()) as cr:
+ cr.execute(query, args)
+ return list(cr.fetchall())
+
+ def set_job_enqueued(self, uuid):
+ with closing(self.conn.cursor()) as cr:
+ cr.execute("UPDATE queue_job SET state=%s, "
+ "date_enqueued=date_trunc('seconds', "
+ " now() at time zone 'utc') "
+ "WHERE uuid=%s",
+ (ENQUEUED, uuid))
+
+
+class QueueJobRunner(object):
+
+ def __init__(self, port=8069, channel_config_string=None):
+ self.port = port
+ self.channel_manager = ChannelManager()
+ if channel_config_string is None:
+ channel_config_string = _channels()
+ self.channel_manager.simple_configure(channel_config_string)
+ self.db_by_name = {}
+ self._stop = False
+ self._stop_pipe = os.pipe()
+
+ def get_db_names(self):
+ if odoo.tools.config['db_name']:
+ db_names = odoo.tools.config['db_name'].split(',')
+ else:
+ db_names = odoo.service.db.exp_list(True)
+ return db_names
+
+ def close_databases(self, remove_jobs=True):
+ for db_name, db in self.db_by_name.items():
+ try:
+ if remove_jobs:
+ self.channel_manager.remove_db(db_name)
+ db.close()
+ except:
+ _logger.warning('error closing database %s',
+ db_name, exc_info=True)
+ self.db_by_name = {}
+
+ def initialize_databases(self):
+ for db_name in self.get_db_names():
+ db = Database(db_name)
+ if not db.has_queue_job:
+ _logger.debug('queue_job is not installed for db %s', db_name)
+ else:
+ self.db_by_name[db_name] = db
+ for job_data in db.select_jobs('state in %s', (NOT_DONE,)):
+ self.channel_manager.notify(db_name, *job_data)
+ _logger.info('queue job runner ready for db %s', db_name)
+
+ def run_jobs(self):
+ now = _odoo_now()
+ for job in self.channel_manager.get_jobs_to_run(now):
+ if self._stop:
+ break
+ _logger.info("asking Odoo to run job %s on db %s",
+ job.uuid, job.db_name)
+ self.db_by_name[job.db_name].set_job_enqueued(job.uuid)
+ _async_http_get(self.port, job.db_name, job.uuid)
+
+ def process_notifications(self):
+ for db in self.db_by_name.values():
+ while db.conn.notifies:
+ if self._stop:
+ break
+ notification = db.conn.notifies.pop()
+ uuid = notification.payload
+ job_datas = db.select_jobs('uuid = %s', (uuid,))
+ if job_datas:
+ self.channel_manager.notify(db.db_name, *job_datas[0])
+ else:
+ self.channel_manager.remove_job(uuid)
+
+ def wait_notification(self):
+ for db in self.db_by_name.values():
+ if db.conn.notifies:
+ # something is going on in the queue, no need to wait
+ return
+ # wait for something to happen in the queue_job tables
+ # we'll select() on database connections and the stop pipe
+ conns = [db.conn for db in self.db_by_name.values()]
+ conns.append(self._stop_pipe[0])
+ # look if the channels specify a wakeup time
+ wakeup_time = self.channel_manager.get_wakeup_time()
+ if not wakeup_time:
+ # this could very well be no timeout at all, because
+ # any activity in the job queue will wake us up, but
+ # let's have a timeout anyway, just to be safe
+ timeout = SELECT_TIMEOUT
+ else:
+ timeout = wakeup_time - _odoo_now()
+ # wait for a notification or a timeout;
+ # if timeout is negative (ie wakeup time in the past),
+ # do not wait; this should rarely happen
+ # because of how get_wakeup_time is designed; actually
+ # if timeout remains a large negative number, it is most
+ # probably a bug
+ _logger.debug("select() timeout: %.2f sec", timeout)
+ if timeout > 0:
+ conns, _, _ = select.select(conns, [], [], timeout)
+ if conns and not self._stop:
+ for conn in conns:
+ conn.poll()
+
+ def stop(self):
+ _logger.info("graceful stop requested")
+ self._stop = True
+ # wakeup the select() in wait_notification
+ os.write(self._stop_pipe[1], '.')
+
+ def run(self):
+ _logger.info("starting")
+ while not self._stop:
+ # outer loop does exception recovery
+ try:
+ _logger.info("initializing database connections")
+ # TODO: how to detect new databases or databases
+ # on which queue_job is installed after server start?
+ self.initialize_databases()
+ _logger.info("database connections ready")
+ # inner loop does the normal processing
+ while not self._stop:
+ self.process_notifications()
+ self.run_jobs()
+ self.wait_notification()
+ except KeyboardInterrupt:
+ self.stop()
+ except:
+ _logger.exception("exception: sleeping %ds and retrying",
+ ERROR_RECOVERY_DELAY)
+ self.close_databases()
+ time.sleep(ERROR_RECOVERY_DELAY)
+ self.close_databases(remove_jobs=False)
+ _logger.info("stopped")
diff --git a/queue_job/models/__init__.py b/queue_job/models/__init__.py
new file mode 100644
index 0000000000..e7266a7f86
--- /dev/null
+++ b/queue_job/models/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+
+from . import base
+from . import queue_job
diff --git a/queue_job/models/base.py b/queue_job/models/base.py
new file mode 100644
index 0000000000..37f28ac60d
--- /dev/null
+++ b/queue_job/models/base.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 Camptocamp
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import inspect
+
+from odoo import models, api
+from ..job import DelayableRecordset
+
+
+class Base(models.AbstractModel):
+ """ The base model, which is implicitly inherited by all models. """
+ _inherit = 'base'
+
+ @api.model_cr
+ def _register_hook(self):
+ """ register marked jobs """
+ super(Base, self)._register_hook()
+ job_methods = [method for __, method
+ in inspect.getmembers(self, predicate=inspect.ismethod)
+ if getattr(method, 'delayable', None)]
+ for job_method in job_methods:
+ self.env['queue.job.function']._register_job(job_method)
+ # add_to_job_registry(job_methods)
+
+ @api.multi
+ def with_delay(self, priority=None, eta=None,
+ max_retries=None, description=None):
+ """ Return a ``DelayableRecordset``
+
+ The returned instance allow to enqueue any method of the recordset's
+ Model which is decorated by :func:`~odoo.addons.queue_job.job.job`.
+
+ Usage::
+
+ self.env['res.users'].with_delay().write({'name': 'test'})
+
+ In the line above, in so far ``write`` is allowed to be delayed with
+ ``@job``, the write will be executed in an asynchronous job.
+
+
+ :param priority: Priority of the job, 0 being the higher priority.
+ Default is 10.
+ :param eta: Estimated Time of Arrival of the job. It will not be
+ executed before this date/time.
+ :param max_retries: maximum number of retries before giving up and set
+ the job state to 'failed'. A value of 0 means
+ infinite retries. Default is 5.
+ :param description: human description of the job. If None, description
+ is computed from the function doc or name
+ :return: instance of a DelayableRecordset
+ :rtype: :class:`odoo.addons.queue_job.job.DelayableRecordset`
+
+ """
+ return DelayableRecordset(self, priority=priority,
+ eta=eta,
+ max_retries=max_retries,
+ description=description)
diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py
new file mode 100644
index 0000000000..42a479bb2f
--- /dev/null
+++ b/queue_job/models/queue_job.py
@@ -0,0 +1,344 @@
+# -*- coding: utf-8 -*-
+# Copyright 2013-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import logging
+from datetime import datetime, timedelta
+
+from odoo import models, fields, api, exceptions, _
+
+from ..job import STATES, DONE, PENDING, Job
+from ..fields import JobSerialized
+
+_logger = logging.getLogger(__name__)
+
+
+def channel_func_name(method):
+ return '<%s>.%s' % (method.im_class._name, method.__name__)
+
+
+class QueueJob(models.Model):
+ """ Job status and result """
+ _name = 'queue.job'
+ _description = 'Queue Job'
+ _inherit = ['mail.thread', 'ir.needaction_mixin']
+ _log_access = False
+
+ _order = 'date_created DESC, date_done DESC'
+
+ _removal_interval = 30 # days
+
+ uuid = fields.Char(string='UUID',
+ readonly=True,
+ index=True,
+ required=True)
+ user_id = fields.Many2one(comodel_name='res.users',
+ string='User ID',
+ required=True)
+ company_id = fields.Many2one(comodel_name='res.company',
+ string='Company', index=True)
+ name = fields.Char(string='Description', readonly=True)
+
+ model_name = fields.Char(string='Model', readonly=True)
+ method_name = fields.Char(readonly=True)
+ record_ids = fields.Serialized(readonly=True)
+ args = JobSerialized(readonly=True)
+ kwargs = JobSerialized(readonly=True)
+ func_string = fields.Char(string='Task', compute='_compute_func_string',
+ readonly=True, store=True)
+
+ state = fields.Selection(STATES,
+ string='State',
+ readonly=True,
+ required=True,
+ index=True)
+ priority = fields.Integer()
+ exc_info = fields.Text(string='Exception Info', readonly=True)
+ result = fields.Text(string='Result', readonly=True)
+
+ date_created = fields.Datetime(string='Created Date', readonly=True)
+ date_started = fields.Datetime(string='Start Date', readonly=True)
+ date_enqueued = fields.Datetime(string='Enqueue Time', readonly=True)
+ date_done = fields.Datetime(string='Date Done', readonly=True)
+
+ eta = fields.Datetime(string='Execute only after')
+ retry = fields.Integer(string='Current try')
+ max_retries = fields.Integer(
+ string='Max. retries',
+ help="The job will fail if the number of tries reach the "
+ "max. retries.\n"
+ "Retries are infinite when empty.",
+ )
+ channel_method_name = fields.Char(readonly=True,
+ compute='_compute_channel',
+ store=True)
+ job_function_id = fields.Many2one(comodel_name='queue.job.function',
+ compute='_compute_channel',
+ string='Job Function',
+ readonly=True,
+ store=True)
+ # for searching without JOIN on channels
+ channel = fields.Char(compute='_compute_channel', store=True, index=True)
+
+ @api.multi
+ @api.depends('model_name', 'method_name', 'job_function_id.channel_id')
+ def _compute_channel(self):
+ for record in self:
+ model = self.env[record.model_name]
+ method = getattr(model, record.method_name)
+ channel_method_name = channel_func_name(method)
+ func_model = self.env['queue.job.function']
+ function = func_model.search([('name', '=', channel_method_name)])
+ record.channel_method_name = channel_method_name
+ record.job_function_id = function
+ record.channel = record.job_function_id.channel
+
+ @api.multi
+ @api.depends('model_name', 'method_name', 'record_ids', 'args', 'kwargs')
+ def _compute_func_string(self):
+ for record in self:
+ record_ids = record.record_ids
+ model = repr(self.env[record.model_name].browse(record_ids))
+ args = [repr(arg) for arg in record.args]
+ kwargs = ['%s=%r' % (key, val) for key, val
+ in record.kwargs.iteritems()]
+ all_args = ', '.join(args + kwargs)
+ record.func_string = (
+ "%s.%s(%s)" % (model, record.method_name, all_args)
+ )
+
+ @api.multi
+ def open_related_action(self):
+ """ Open the related action associated to the job """
+ self.ensure_one()
+ job = Job.load(self.env, self.uuid)
+ action = job.related_action()
+ if action is None:
+ raise exceptions.Warning(_('No action available for this job'))
+ return action
+
+ @api.multi
+ def _change_job_state(self, state, result=None):
+ """ Change the state of the `Job` object itself so it
+ will change the other fields (date, result, ...)
+ """
+ for record in self:
+ job_ = Job.load(record.env, record.uuid)
+ if state == DONE:
+ job_.set_done(result=result)
+ elif state == PENDING:
+ job_.set_pending(result=result)
+ else:
+ raise ValueError('State not supported: %s' % state)
+ job_.store()
+
+ @api.multi
+ def button_done(self):
+ result = _('Manually set to done by %s') % self.env.user.name
+ self._change_job_state(DONE, result=result)
+ return True
+
+ @api.multi
+ def requeue(self):
+ self._change_job_state(PENDING)
+ return True
+
+ @api.multi
+ def write(self, vals):
+ res = super(QueueJob, self).write(vals)
+ if vals.get('state') == 'failed':
+ # subscribe the users now to avoid to subscribe them
+ # at every job creation
+ self._subscribe_users()
+ for record in self:
+ msg = record._message_failed_job()
+ if msg:
+ record.message_post(body=msg,
+ subtype='queue_job.mt_job_failed')
+ return res
+
+ @api.multi
+ def _subscribe_users(self):
+ """ Subscribe all users having the 'Queue Job Manager' group """
+ group = self.env.ref('queue_job.group_queue_job_manager')
+ if not group:
+ return
+ companies = self.mapped('company_id')
+ domain = [('groups_id', '=', group.id)]
+ if companies:
+ domain.append(('company_id', 'child_of', companies.ids))
+ users = self.env['res.users'].search(domain)
+ self.message_subscribe_users(user_ids=users.ids)
+
+ @api.multi
+ def _message_failed_job(self):
+ """ Return a message which will be posted on the job when it is failed.
+
+ It can be inherited to allow more precise messages based on the
+ exception informations.
+
+ If nothing is returned, no message will be posted.
+ """
+ self.ensure_one()
+ return _("Something bad happened during the execution of the job. "
+ "More details in the 'Exception Information' section.")
+
+ @api.model
+ def _needaction_domain_get(self):
+ """ Returns the domain to filter records that require an action
+ :return: domain or False is no action
+ """
+ return [('state', '=', 'failed')]
+
+ @api.model
+ def autovacuum(self):
+ """ Delete all jobs done since more than ``_removal_interval`` days.
+
+ Called from a cron.
+ """
+ deadline = datetime.now() - timedelta(days=self._removal_interval)
+ jobs = self.search(
+ [('date_done', '<=', fields.Datetime.to_string(deadline))],
+ )
+ jobs.unlink()
+ return True
+
+
+class RequeueJob(models.TransientModel):
+ _name = 'queue.requeue.job'
+ _description = 'Wizard to requeue a selection of jobs'
+
+ @api.model
+ def _default_job_ids(self):
+ res = False
+ context = self.env.context
+ if (context.get('active_model') == 'queue.job' and
+ context.get('active_ids')):
+ res = context['active_ids']
+ return res
+
+ job_ids = fields.Many2many(comodel_name='queue.job',
+ string='Jobs',
+ default=_default_job_ids)
+
+ @api.multi
+ def requeue(self):
+ jobs = self.job_ids
+ jobs.requeue()
+ return {'type': 'ir.actions.act_window_close'}
+
+
+class JobChannel(models.Model):
+ _name = 'queue.job.channel'
+ _description = 'Job Channels'
+
+ name = fields.Char()
+ complete_name = fields.Char(compute='_compute_complete_name',
+ string='Complete Name',
+ store=True,
+ readonly=True)
+ parent_id = fields.Many2one(comodel_name='queue.job.channel',
+ string='Parent Channel',
+ ondelete='restrict')
+ job_function_ids = fields.One2many(comodel_name='queue.job.function',
+ inverse_name='channel_id',
+ string='Job Functions')
+
+ _sql_constraints = [
+ ('name_uniq',
+ 'unique(complete_name)',
+ 'Channel complete name must be unique'),
+ ]
+
+ @api.multi
+ @api.depends('name', 'parent_id.complete_name')
+ def _compute_complete_name(self):
+ for record in self:
+ # if not record.name:
+ # return # new record
+ channel = record
+ parts = [channel.name]
+ while channel.parent_id:
+ channel = channel.parent_id
+ parts.append(channel.name)
+ record.complete_name = '.'.join(reversed(parts))
+
+ @api.multi
+ @api.constrains('parent_id', 'name')
+ def parent_required(self):
+ for record in self:
+ if record.name != 'root' and not record.parent_id:
+ raise exceptions.ValidationError(_('Parent channel required.'))
+
+ @api.multi
+ def write(self, values):
+ for channel in self:
+ if (not self.env.context.get('install_mode') and
+ channel.name == 'root' and
+ ('name' in values or 'parent_id' in values)):
+ raise exceptions.Warning(_('Cannot change the root channel'))
+ return super(JobChannel, self).write(values)
+
+ @api.multi
+ def unlink(self):
+ for channel in self:
+ if channel.name == 'root':
+ raise exceptions.Warning(_('Cannot remove the root channel'))
+ return super(JobChannel, self).unlink()
+
+ @api.multi
+ def name_get(self):
+ result = []
+ for record in self:
+ result.append((record.id, record.complete_name))
+ return result
+
+
+class JobFunction(models.Model):
+ _name = 'queue.job.function'
+ _description = 'Job Functions'
+ _log_access = False
+
+ @api.model
+ def _default_channel(self):
+ return self.env.ref('queue_job.channel_root')
+
+ name = fields.Char(index=True)
+ channel_id = fields.Many2one(comodel_name='queue.job.channel',
+ string='Channel',
+ required=True,
+ default=_default_channel)
+ channel = fields.Char(related='channel_id.complete_name',
+ store=True,
+ readonly=True)
+
+ @api.model
+ def _find_or_create_channel(self, channel_path):
+ channel_model = self.env['queue.job.channel']
+ parts = channel_path.split('.')
+ parts.reverse()
+ channel_name = parts.pop()
+ assert channel_name == 'root', "A channel path starts with 'root'"
+ # get the root channel
+ channel = channel_model.search([('name', '=', channel_name)])
+ while parts:
+ channel_name = parts.pop()
+ parent_channel = channel
+ channel = channel_model.search([
+ ('name', '=', channel_name),
+ ('parent_id', '=', parent_channel.id)],
+ limit=1,
+ )
+ if not channel:
+ channel = channel_model.create({
+ 'name': channel_name,
+ 'parent_id': parent_channel.id,
+ })
+ return channel
+
+ @api.model
+ def _register_job(self, job_method):
+ func_name = channel_func_name(job_method)
+ if not self.search_count([('name', '=', func_name)]):
+ channel = self._find_or_create_channel(job_method.default_channel)
+ self.create({'name': func_name, 'channel_id': channel.id})
diff --git a/queue_job/security/ir.model.access.csv b/queue_job/security/ir.model.access.csv
new file mode 100644
index 0000000000..e90eee9ae4
--- /dev/null
+++ b/queue_job/security/ir.model.access.csv
@@ -0,0 +1,4 @@
+id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
+access_queue_job_manager,queue job manager,queue_job.model_queue_job,queue_job.group_queue_job_manager,1,1,1,1
+access_queue_job_function_manager,queue job functions manager,queue_job.model_queue_job_function,queue_job.group_queue_job_manager,1,1,1,1
+access_queue_job_channel_manager,queue job channel manager,queue_job.model_queue_job_channel,queue_job.group_queue_job_manager,1,1,1,1
diff --git a/queue_job/security/security.xml b/queue_job/security/security.xml
new file mode 100644
index 0000000000..152ac4a679
--- /dev/null
+++ b/queue_job/security/security.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+ Job Queue
+ 20
+
+
+
+ Job Queue Manager
+
+
+
+
+
+
+
+
+
+ Job Queue multi-company
+
+
+ ['|',('company_id','=',False),('company_id','child_of',[user.company_id.id])]
+
+
+
+
+
diff --git a/queue_job/static/description/icon.png b/queue_job/static/description/icon.png
new file mode 100644
index 0000000000..3a0328b516
Binary files /dev/null and b/queue_job/static/description/icon.png differ
diff --git a/queue_job/tests/__init__.py b/queue_job/tests/__init__.py
new file mode 100644
index 0000000000..471c7d634a
--- /dev/null
+++ b/queue_job/tests/__init__.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8 -*-
+
+from . import test_runner_channels
+from . import test_runner_runner
+from . import test_json_field
diff --git a/queue_job/tests/common.py b/queue_job/tests/common.py
new file mode 100644
index 0000000000..40a96afc6f
--- /dev/null
+++ b/queue_job/tests/common.py
@@ -0,0 +1 @@
+# -*- coding: utf-8 -*-
diff --git a/queue_job/tests/test_json_field.py b/queue_job/tests/test_json_field.py
new file mode 100644
index 0000000000..54370e1b63
--- /dev/null
+++ b/queue_job/tests/test_json_field.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# copyright 2016 Camptocamp
+# license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import json
+
+from odoo.tests import common
+from odoo.addons.queue_job.fields import JobEncoder, JobDecoder
+
+
+class TestJson(common.TransactionCase):
+
+ def test_encoder(self):
+ value = ['a', 1, self.env.ref('base.user_root')]
+ value_json = json.dumps(value, cls=JobEncoder)
+ expected = ('["a", 1, {"_type": "odoo_recordset", '
+ '"model": "res.users", "ids": [1]}]')
+ self.assertEqual(value_json, expected)
+
+ def test_decoder(self):
+ value_json = ('["a", 1, {"_type": "odoo_recordset",'
+ '"model": "res.users", "ids": [1]}]')
+ expected = ['a', 1, self.env.ref('base.user_root')]
+ value = json.loads(value_json, cls=JobDecoder, env=self.env)
+ self.assertEqual(value, expected)
diff --git a/queue_job/tests/test_runner_channels.py b/queue_job/tests/test_runner_channels.py
new file mode 100644
index 0000000000..b5e8db6af7
--- /dev/null
+++ b/queue_job/tests/test_runner_channels.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import doctest
+from odoo.addons.queue_job.jobrunner import channels
+
+
+def load_tests(loader, tests, ignore):
+ tests.addTests(doctest.DocTestSuite(channels))
+ return tests
diff --git a/queue_job/tests/test_runner_runner.py b/queue_job/tests/test_runner_runner.py
new file mode 100644
index 0000000000..86c6ec06f4
--- /dev/null
+++ b/queue_job/tests/test_runner_runner.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import doctest
+from odoo.addons.queue_job.jobrunner import runner
+
+
+def load_tests(loader, tests, ignore):
+ tests.addTests(doctest.DocTestSuite(runner))
+ return tests
diff --git a/queue_job/views/queue_job_views.xml b/queue_job/views/queue_job_views.xml
new file mode 100644
index 0000000000..f814a60724
--- /dev/null
+++ b/queue_job/views/queue_job_views.xml
@@ -0,0 +1,296 @@
+
+
+
+
+ queue.job.form
+ queue.job
+
+
+
+
+
+
+ queue.job.tree
+ queue.job
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ queue.job.search
+ queue.job
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Jobs
+ queue.job
+ form
+ tree,form
+ {'search_default_pending': 1,
+ 'search_default_enqueued': 1,
+ 'search_default_started': 1,
+ 'search_default_failed': 1}
+
+
+
+
+
+
+ Requeue Jobs
+ queue.requeue.job
+
+
+
+
+
+
+ Requeue Jobs
+ queue.requeue.job
+ form
+ form
+
+ new
+
+
+
+
+ Requeue Jobs
+ client_action_multi
+
+ action
+ queue.job
+
+
+
+ queue.job.channel.form
+ queue.job.channel
+
+
+
+
+
+
+ queue.job.channel.tree
+ queue.job.channel
+
+
+
+
+
+
+
+
+ queue.job.channel.search
+ queue.job.channel
+
+
+
+
+
+
+
+
+
+
+ Channels
+ queue.job.channel
+ form
+ tree,form
+ {}
+
+
+
+
+ queue.job.function.form
+ queue.job.function
+
+
+
+
+
+
+ queue.job.function.tree
+ queue.job.function
+
+
+
+
+
+
+
+
+
+ queue.job.function.search
+ queue.job.function
+
+
+
+
+
+
+
+
+
+
+
+
+ Job Functions
+ queue.job.function
+ form
+ tree,form
+ {}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/setup/queue_job/odoo/__init__.py b/setup/queue_job/odoo/__init__.py
new file mode 100644
index 0000000000..de40ea7ca0
--- /dev/null
+++ b/setup/queue_job/odoo/__init__.py
@@ -0,0 +1 @@
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/setup/queue_job/odoo/addons/__init__.py b/setup/queue_job/odoo/addons/__init__.py
new file mode 100644
index 0000000000..de40ea7ca0
--- /dev/null
+++ b/setup/queue_job/odoo/addons/__init__.py
@@ -0,0 +1 @@
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/setup/queue_job/odoo/addons/queue_job b/setup/queue_job/odoo/addons/queue_job
new file mode 120000
index 0000000000..ac796aaa1c
--- /dev/null
+++ b/setup/queue_job/odoo/addons/queue_job
@@ -0,0 +1 @@
+../../../../queue_job
\ No newline at end of file
diff --git a/setup/queue_job/setup.py b/setup/queue_job/setup.py
new file mode 100644
index 0000000000..28c57bb640
--- /dev/null
+++ b/setup/queue_job/setup.py
@@ -0,0 +1,6 @@
+import setuptools
+
+setuptools.setup(
+ setup_requires=['setuptools-odoo'],
+ odoo_addon=True,
+)
diff --git a/test_queue_job/README.rst b/test_queue_job/README.rst
new file mode 100644
index 0000000000..96931d3c3e
--- /dev/null
+++ b/test_queue_job/README.rst
@@ -0,0 +1,70 @@
+.. image:: https://img.shields.io/badge/licence-AGPL--3-blue.svg
+ :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html
+ :alt: License: AGPL-3
+
+==============
+Test Job Queue
+==============
+
+This addon is not meant to be used. It extends the Odoo Models
+in order to run automated tests on the jobs queue.
+
+The basic tests are integrated with the ``queue_job`` addon,
+but for the tests that need several job methods are done in
+this module to avoid to pollute the Models.
+
+Installation
+============
+
+Nothing particular.
+
+Usage
+=====
+
+This module only contains Python tests.
+
+Known issues / Roadmap
+======================
+
+Bug Tracker
+===========
+
+Bugs are tracked on `GitHub Issues
+`_. In case of trouble, please
+check there if your issue has already been reported. If you spotted it first,
+help us smashing it by providing a detailed and welcomed feedback.
+
+Credits
+=======
+
+Images
+------
+
+* Odoo Community Association: `Icon `_.
+
+Contributors
+------------
+
+* Guewen Baconnier
+* Stéphane Bidoul
+* Matthieu Dietrich
+* Jos De Graeve
+* David Lefever
+* Laurent Mignon
+* Laetitia Gangloff
+
+Maintainer
+----------
+
+.. image:: https://odoo-community.org/logo.png
+ :alt: Odoo Community Association
+ :target: https://odoo-community.org
+
+This module is maintained by the OCA.
+
+OCA, or the Odoo Community Association, is a nonprofit organization whose
+mission is to support the collaborative development of Odoo features and
+promote its widespread use.
+
+To contribute to this module, please visit https://odoo-community.org.
+
diff --git a/test_queue_job/__init__.py b/test_queue_job/__init__.py
new file mode 100644
index 0000000000..a0fdc10fe1
--- /dev/null
+++ b/test_queue_job/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+from . import models
diff --git a/test_queue_job/__manifest__.py b/test_queue_job/__manifest__.py
new file mode 100644
index 0000000000..9b3f850cda
--- /dev/null
+++ b/test_queue_job/__manifest__.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+{'name': 'Queue Job Tests',
+ 'version': '10.0.1.0.0',
+ 'author': 'Camptocamp,Odoo Community Association (OCA)',
+ 'license': 'AGPL-3',
+ 'category': 'Generic Modules',
+ 'depends': ['queue_job',
+ ],
+ 'website': 'http://www.camptocamp.com',
+ 'data': ['security/ir.model.access.csv',
+ ],
+ 'installable': True,
+ }
diff --git a/test_queue_job/models/__init__.py b/test_queue_job/models/__init__.py
new file mode 100644
index 0000000000..153d090f53
--- /dev/null
+++ b/test_queue_job/models/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+from . import test_models
diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py
new file mode 100644
index 0000000000..b76dbab738
--- /dev/null
+++ b/test_queue_job/models/test_models.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+from odoo import api, fields, models
+from odoo.addons.queue_job.job import job, related_action
+from odoo.addons.queue_job.exception import RetryableJobError
+
+
+class QueueJob(models.Model):
+
+ _inherit = 'queue.job'
+
+ @api.multi
+ def testing_related_method(self, **kwargs):
+ return self, kwargs
+
+ @api.multi
+ def testing_related__none(self, **kwargs):
+ return None
+
+ @api.multi
+ def testing_related__url(self, **kwargs):
+ assert 'url' in kwargs, "url required"
+ subject = self.args[0]
+ return {
+ 'type': 'ir.actions.act_url',
+ 'target': 'new',
+ 'url': kwargs['url'].format(subject=subject),
+ }
+
+
+class TestQueueJob(models.Model):
+
+ _name = 'test.queue.job'
+ _description = "Test model for queue.job"
+
+ name = fields.Char()
+
+ @job
+ @related_action(action='testing_related_method')
+ @api.multi
+ def testing_method(self, *args, **kwargs):
+ """ Method used for tests
+
+ Return always the arguments and keyword arguments received
+ """
+ if kwargs.get('raise_retry'):
+ raise RetryableJobError('Must be retried later')
+ if kwargs.get('return_context'):
+ return self.env.context
+ return args, kwargs
+
+ @job
+ def no_description(self):
+ return
+
+ @job(retry_pattern={1: 60, 2: 180, 3: 10, 5: 300})
+ def job_with_retry_pattern(self):
+ return
+
+ @job(retry_pattern={3: 180})
+ def job_with_retry_pattern__no_zero(self):
+ return
+
+ @job
+ def mapped(self, func):
+ return super(TestQueueJob, self).mapped(func)
+
+ @job
+ def job_alter_mutable(self, mutable_arg, mutable_kwarg=None):
+ mutable_arg.append(2)
+ mutable_kwarg['b'] = 2
+ return mutable_arg, mutable_kwarg
+
+
+class TestQueueChannel(models.Model):
+
+ _name = 'test.queue.channel'
+ _description = "Test model for queue.channel"
+
+ @job
+ def job_a(self):
+ return
+
+ @job
+ def job_b(self):
+ return
+
+ @job(default_channel='root.sub.subsub')
+ def job_sub_channel(self):
+ return
+
+
+class TestRelatedAction(models.Model):
+
+ _name = 'test.related.action'
+ _description = "Test model for related actions"
+
+ @job
+ def testing_related_action__no(self):
+ return
+
+ @job
+ @related_action() # default action returns None
+ def testing_related_action__return_none(self):
+ return
+
+ @job
+ @related_action(action='testing_related_method', b=4)
+ def testing_related_action__kwargs(self):
+ return
+
+ @job
+ @related_action(action='testing_related__url',
+ url='https://en.wikipedia.org/wiki/{subject}')
+ def testing_related_action__store(self):
+ return
diff --git a/test_queue_job/security/ir.model.access.csv b/test_queue_job/security/ir.model.access.csv
new file mode 100644
index 0000000000..dba58d51a2
--- /dev/null
+++ b/test_queue_job/security/ir.model.access.csv
@@ -0,0 +1,4 @@
+id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
+access_test_queue_job,access_test_queue_job,model_test_queue_job,,1,1,1,1
+access_test_queue_channel,access_test_queue_channel,model_test_queue_channel,,1,1,1,1
+access_test_related_action,access_test_related_action,model_test_related_action,,1,1,1,1
diff --git a/test_queue_job/tests/__init__.py b/test_queue_job/tests/__init__.py
new file mode 100644
index 0000000000..63498a9eae
--- /dev/null
+++ b/test_queue_job/tests/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+from . import test_job
+from . import test_job_channels
+from . import test_related_actions
diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py
new file mode 100644
index 0000000000..3279fc4136
--- /dev/null
+++ b/test_queue_job/tests/test_job.py
@@ -0,0 +1,617 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+from datetime import datetime, timedelta
+import mock
+
+from odoo import SUPERUSER_ID
+import odoo.tests.common as common
+
+from odoo.addons.queue_job.exception import (
+ FailedJobError,
+ NoSuchJobError,
+ RetryableJobError,
+)
+from odoo.addons.queue_job.job import (
+ Job,
+ RETRY_INTERVAL,
+ PENDING,
+ ENQUEUED,
+ STARTED,
+ DONE,
+ FAILED,
+)
+
+
+class TestJobsOnTestingMethod(common.TransactionCase):
+ """ Test Job """
+
+ def setUp(self):
+ super(TestJobsOnTestingMethod, self).setUp()
+ self.queue_job = self.env['queue.job']
+ self.method = self.env['test.queue.job'].testing_method
+
+ def test_new_job(self):
+ """
+ Create a job
+ """
+ test_job = Job(self.method)
+ self.assertEqual(test_job.func, self.method)
+
+ def test_eta(self):
+ """ When an `eta` is datetime, it uses it """
+ now = datetime.now()
+ method = self.env['res.users'].mapped
+ job_a = Job(method, eta=now)
+ self.assertEqual(job_a.eta, now)
+
+ def test_eta_integer(self):
+ """ When an `eta` is an integer, it adds n seconds up to now """
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0)
+ job_a = Job(self.method, eta=60)
+ self.assertEqual(job_a.eta, datetime(2015, 3, 15, 16, 42, 0))
+
+ def test_eta_timedelta(self):
+ """ When an `eta` is a timedelta, it adds it up to now """
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0)
+ delta = timedelta(hours=3)
+ job_a = Job(self.method, eta=delta)
+ self.assertEqual(job_a.eta, datetime(2015, 3, 15, 19, 41, 0))
+
+ def test_perform_args(self):
+ test_job = Job(self.method,
+ args=('o', 'k'),
+ kwargs={'c': '!'})
+ result = test_job.perform()
+ self.assertEqual(result, (('o', 'k'), {'c': '!'}))
+
+ def test_retryable_error(self):
+ test_job = Job(self.method,
+ kwargs={'raise_retry': True},
+ max_retries=3)
+ self.assertEqual(test_job.retry, 0)
+ with self.assertRaises(RetryableJobError):
+ test_job.perform()
+ self.assertEqual(test_job.retry, 1)
+ with self.assertRaises(RetryableJobError):
+ test_job.perform()
+ self.assertEqual(test_job.retry, 2)
+ with self.assertRaises(FailedJobError):
+ test_job.perform()
+ self.assertEqual(test_job.retry, 3)
+
+ def test_infinite_retryable_error(self):
+ test_job = Job(self.method,
+ kwargs={'raise_retry': True},
+ max_retries=0)
+ self.assertEqual(test_job.retry, 0)
+ with self.assertRaises(RetryableJobError):
+ test_job.perform()
+ self.assertEqual(test_job.retry, 1)
+
+ def test_on_instance_method(self):
+
+ class A(object):
+ def method(self):
+ pass
+
+ with self.assertRaises(TypeError):
+ Job(A.method)
+
+ def test_on_model_method(self):
+ job_ = Job(self.env['test.queue.job'].testing_method)
+ self.assertEquals(job_.model_name, 'test.queue.job')
+ self.assertEquals(job_.method_name, 'testing_method')
+
+ def test_invalid_function(self):
+ with self.assertRaises(TypeError):
+ Job(1)
+
+ def test_set_pending(self):
+ job_a = Job(self.method)
+ job_a.set_pending(result='test')
+ self.assertEquals(job_a.state, PENDING)
+ self.assertFalse(job_a.date_enqueued)
+ self.assertFalse(job_a.date_started)
+ self.assertEquals(job_a.retry, 0)
+ self.assertEquals(job_a.result, 'test')
+
+ def test_set_enqueued(self):
+ job_a = Job(self.method)
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0)
+ job_a.set_enqueued()
+
+ self.assertEquals(job_a.state, ENQUEUED)
+ self.assertEquals(job_a.date_enqueued,
+ datetime(2015, 3, 15, 16, 41, 0))
+ self.assertFalse(job_a.date_started)
+
+ def test_set_started(self):
+ job_a = Job(self.method)
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0)
+ job_a.set_started()
+
+ self.assertEquals(job_a.state, STARTED)
+ self.assertEquals(job_a.date_started,
+ datetime(2015, 3, 15, 16, 41, 0))
+
+ def test_set_done(self):
+ job_a = Job(self.method)
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0)
+ job_a.set_done(result='test')
+
+ self.assertEquals(job_a.state, DONE)
+ self.assertEquals(job_a.result, 'test')
+ self.assertEquals(job_a.date_done,
+ datetime(2015, 3, 15, 16, 41, 0))
+ self.assertFalse(job_a.exc_info)
+
+ def test_set_failed(self):
+ job_a = Job(self.method)
+ job_a.set_failed(exc_info='failed test')
+ self.assertEquals(job_a.state, FAILED)
+ self.assertEquals(job_a.exc_info, 'failed test')
+
+ def test_postpone(self):
+ job_a = Job(self.method)
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(2015, 3, 15, 16, 41, 0)
+ job_a.postpone(result='test', seconds=60)
+
+ self.assertEquals(job_a.eta, datetime(2015, 3, 15, 16, 42, 0))
+ self.assertEquals(job_a.result, 'test')
+ self.assertFalse(job_a.exc_info)
+
+ def test_store(self):
+ test_job = Job(self.method)
+ test_job.store()
+ stored = self.queue_job.search([('uuid', '=', test_job.uuid)])
+ self.assertEqual(len(stored), 1)
+
+ def test_read(self):
+ eta = datetime.now() + timedelta(hours=5)
+ test_job = Job(self.method,
+ args=('o', 'k'),
+ kwargs={'c': '!'},
+ priority=15,
+ eta=eta,
+ description="My description")
+ test_job.user_id = 1
+ test_job.company_id = self.env.ref("base.main_company").id
+ test_job.store()
+ job_read = Job.load(self.env, test_job.uuid)
+ self.assertEqual(test_job.uuid, job_read.uuid)
+ self.assertEqual(test_job.model_name, job_read.model_name)
+ self.assertEqual(test_job.func, job_read.func)
+ self.assertEqual(test_job.args, job_read.args)
+ self.assertEqual(test_job.kwargs, job_read.kwargs)
+ self.assertEqual(test_job.method_name, job_read.method_name)
+ self.assertEqual(test_job.description, job_read.description)
+ self.assertEqual(test_job.state, job_read.state)
+ self.assertEqual(test_job.priority, job_read.priority)
+ self.assertEqual(test_job.exc_info, job_read.exc_info)
+ self.assertEqual(test_job.result, job_read.result)
+ self.assertEqual(test_job.user_id, job_read.user_id)
+ self.assertEqual(test_job.company_id, job_read.company_id)
+ delta = timedelta(seconds=1) # DB does not keep milliseconds
+ self.assertAlmostEqual(test_job.date_created, job_read.date_created,
+ delta=delta)
+ self.assertAlmostEqual(test_job.date_started, job_read.date_started,
+ delta=delta)
+ self.assertAlmostEqual(test_job.date_enqueued, job_read.date_enqueued,
+ delta=delta)
+ self.assertAlmostEqual(test_job.date_done, job_read.date_done,
+ delta=delta)
+ self.assertAlmostEqual(test_job.eta, job_read.eta,
+ delta=delta)
+
+ test_date = datetime(2015, 3, 15, 21, 7, 0)
+ job_read.date_enqueued = test_date
+ job_read.date_started = test_date
+ job_read.date_done = test_date
+ job_read.store()
+
+ job_read = Job.load(self.env, test_job.uuid)
+ self.assertAlmostEqual(job_read.date_started, test_date,
+ delta=delta)
+ self.assertAlmostEqual(job_read.date_enqueued, test_date,
+ delta=delta)
+ self.assertAlmostEqual(job_read.date_done, test_date,
+ delta=delta)
+
+ def test_job_unlinked(self):
+ test_job = Job(self.method,
+ args=('o', 'k'),
+ kwargs={'c': '!'})
+ test_job.store()
+ stored = self.queue_job.search([('uuid', '=', test_job.uuid)])
+ stored.unlink()
+ with self.assertRaises(NoSuchJobError):
+ Job.load(self.env, test_job.uuid)
+
+ def test_unicode(self):
+ test_job = Job(self.method,
+ args=(u'öô¿‽', u'ñě'),
+ kwargs={'c': u'ßø'},
+ priority=15,
+ description=u"My dé^Wdescription")
+ test_job.user_id = 1
+ test_job.store()
+ job_read = Job.load(self.env, test_job.uuid)
+ self.assertEqual(test_job.args, job_read.args)
+ self.assertEqual(job_read.args, (u'öô¿‽', u'ñě'))
+ self.assertEqual(test_job.kwargs, job_read.kwargs)
+ self.assertEqual(job_read.kwargs, {'c': u'ßø'})
+ self.assertEqual(test_job.description, job_read.description)
+ self.assertEqual(job_read.description, u"My dé^Wdescription")
+
+ def test_accented_bytestring(self):
+ test_job = Job(self.method,
+ args=('öô¿‽', 'ñě'),
+ kwargs={'c': 'ßø'},
+ priority=15,
+ description="My dé^Wdescription")
+ test_job.user_id = 1
+ test_job.store()
+ job_read = Job.load(self.env, test_job.uuid)
+ # the job's args and description have been created as bytestring but
+ # are decoded to utf8 by the ORM so make them comparable
+ self.assertEqual(job_read.args, ('öô¿‽'.decode('utf8'),
+ 'ñě'.decode('utf8')))
+ self.assertEqual(job_read.kwargs, {'c': 'ßø'.decode('utf8')})
+ self.assertEqual(job_read.description,
+ "My dé^Wdescription".decode('utf8'))
+
+ def test_job_delay(self):
+ self.cr.execute('delete from queue_job')
+ job_ = self.env['test.queue.job'].with_delay().testing_method()
+ stored = self.queue_job.search([])
+ self.assertEqual(len(stored), 1)
+ self.assertEqual(
+ stored.uuid,
+ job_.uuid,
+ 'Incorrect returned Job UUID')
+
+ def test_job_delay_model_method(self):
+ self.cr.execute('delete from queue_job')
+ delayable = self.env['test.queue.job'].with_delay()
+ job_instance = delayable.testing_method('a', k=1)
+ self.assertTrue(job_instance)
+ result = job_instance.perform()
+ self.assertEquals(
+ result,
+ (('a',), {'k': 1})
+ )
+
+
+class TestJobs(common.TransactionCase):
+ """ Test jobs on other methods or with different job configuration """
+
+ def test_description(self):
+ """ If no description is given to the job, it
+ should be computed from the function
+ """
+ # if a docstring is defined for the function
+ # it's used as description
+ job_a = Job(self.env['test.queue.job'].testing_method)
+ self.assertEqual(job_a.description, "Method used for tests")
+ # if no docstring, the description is computed
+ job_b = Job(self.env['test.queue.job'].no_description)
+ self.assertEqual(job_b.description, "test.queue.job.no_description")
+ # case when we explicitly specify the description
+ description = "My description"
+ job_a = Job(self.env['test.queue.job'].testing_method,
+ description=description)
+ self.assertEqual(job_a.description, description)
+
+ def test_retry_pattern(self):
+ """ When we specify a retry pattern, the eta must follow it"""
+ datetime_path = 'odoo.addons.queue_job.job.datetime'
+ method = self.env['test.queue.job'].job_with_retry_pattern
+ with mock.patch(datetime_path, autospec=True) as mock_datetime:
+ mock_datetime.now.return_value = datetime(
+ 2015, 6, 1, 15, 10, 0
+ )
+ test_job = Job(method, max_retries=0)
+ test_job.retry += 1
+ test_job.postpone(self.env)
+ self.assertEqual(test_job.retry, 1)
+ self.assertEqual(test_job.eta,
+ datetime(2015, 6, 1, 15, 11, 0))
+ test_job.retry += 1
+ test_job.postpone(self.env)
+ self.assertEqual(test_job.retry, 2)
+ self.assertEqual(test_job.eta,
+ datetime(2015, 6, 1, 15, 13, 0))
+ test_job.retry += 1
+ test_job.postpone(self.env)
+ self.assertEqual(test_job.retry, 3)
+ self.assertEqual(test_job.eta,
+ datetime(2015, 6, 1, 15, 10, 10))
+ test_job.retry += 1
+ test_job.postpone(self.env)
+ self.assertEqual(test_job.retry, 4)
+ self.assertEqual(test_job.eta,
+ datetime(2015, 6, 1, 15, 10, 10))
+ test_job.retry += 1
+ test_job.postpone(self.env)
+ self.assertEqual(test_job.retry, 5)
+ self.assertEqual(test_job.eta,
+ datetime(2015, 6, 1, 15, 15, 0))
+
+ def test_retry_pattern_no_zero(self):
+ """ When we specify a retry pattern without 0, uses RETRY_INTERVAL"""
+ method = self.env['test.queue.job'].job_with_retry_pattern__no_zero
+ test_job = Job(method, max_retries=0)
+ test_job.retry += 1
+ self.assertEqual(test_job.retry, 1)
+ self.assertEqual(test_job._get_retry_seconds(), RETRY_INTERVAL)
+ test_job.retry += 1
+ self.assertEqual(test_job.retry, 2)
+ self.assertEqual(test_job._get_retry_seconds(), RETRY_INTERVAL)
+ test_job.retry += 1
+ self.assertEqual(test_job.retry, 3)
+ self.assertEqual(test_job._get_retry_seconds(), 180)
+ test_job.retry += 1
+ self.assertEqual(test_job.retry, 4)
+ self.assertEqual(test_job._get_retry_seconds(), 180)
+
+ def test_job_delay_model_method_multi(self):
+ rec1 = self.env['test.queue.job'].create({'name': 'test1'})
+ rec2 = self.env['test.queue.job'].create({'name': 'test2'})
+ recs = rec1 + rec2
+ job_instance = recs.with_delay().mapped('name')
+ self.assertTrue(job_instance)
+ self.assertEquals(job_instance.args, ('name',))
+ self.assertEquals(job_instance.recordset, recs)
+ self.assertEquals(job_instance.model_name, 'test.queue.job')
+ self.assertEquals(job_instance.method_name, 'mapped')
+ self.assertEquals(['test1', 'test2'], job_instance.perform())
+
+ def test_job_with_mutable_arguments(self):
+ """ Job with mutable arguments do not mutate on perform() """
+ delayable = self.env['test.queue.job'].with_delay()
+ job_instance = delayable.job_alter_mutable([1], mutable_kwarg={'a': 1})
+ self.assertTrue(job_instance)
+ result = job_instance.perform()
+ self.assertEquals(
+ result,
+ ([1, 2], {'a': 1, 'b': 2})
+ )
+ job_instance.set_done()
+ # at this point, the 'args' and 'kwargs' of the job instance
+ # might have been modified, but they must never be modified in
+ # the queue_job table after their creation, so a new 'load' will
+ # get the initial values.
+ job_instance.store()
+ # jobs are always loaded before being performed, so we simulate
+ # this behavior here to check if we have the correct initial arguments
+ job_instance = Job.load(self.env, job_instance.uuid)
+ self.assertEquals(([1],), job_instance.args)
+ self.assertEquals({'mutable_kwarg': {'a': 1}}, job_instance.kwargs)
+
+
+class TestJobModel(common.TransactionCase):
+
+ def setUp(self):
+ super(TestJobModel, self).setUp()
+ self.queue_job = self.env['queue.job']
+ self.user = self.env['res.users']
+ self.method = self.env['test.queue.job'].testing_method
+
+ def _create_job(self):
+ test_job = Job(self.method)
+ test_job.store()
+ stored = Job.db_record_from_uuid(self.env, test_job.uuid)
+ self.assertEqual(len(stored), 1)
+ return stored
+
+ def test_job_change_state(self):
+ stored = self._create_job()
+ stored._change_job_state(DONE, result='test')
+ self.assertEqual(stored.state, DONE)
+ self.assertEqual(stored.result, 'test')
+ stored._change_job_state(PENDING, result='test2')
+ self.assertEqual(stored.state, PENDING)
+ self.assertEqual(stored.result, 'test2')
+ with self.assertRaises(ValueError):
+ # only PENDING and DONE supported
+ stored._change_job_state(STARTED)
+
+ def test_button_done(self):
+ stored = self._create_job()
+ stored.button_done()
+ self.assertEqual(stored.state, DONE)
+ self.assertEqual(stored.result,
+ 'Manually set to done by %s' % self.env.user.name)
+
+ def test_requeue(self):
+ stored = self._create_job()
+ stored.write({'state': 'failed'})
+ stored.requeue()
+ self.assertEqual(stored.state, PENDING)
+
+ def test_message_when_write_fail(self):
+ stored = self._create_job()
+ stored.write({'state': 'failed'})
+ self.assertEqual(stored.state, FAILED)
+ messages = stored.message_ids
+ self.assertEqual(len(messages), 2)
+
+ def test_follower_when_write_fail(self):
+ """Check that inactive users doesn't are not followers even if
+ they are linked to an active partner"""
+ group = self.env.ref('queue_job.group_queue_job_manager')
+ vals = {'name': 'xx',
+ 'login': 'xx',
+ 'groups_id': [(6, 0, [group.id])],
+ 'active': False,
+ }
+ inactiveusr = self.user.create(vals)
+ inactiveusr.partner_id.active = True
+ self.assertFalse(inactiveusr in group.users)
+ stored = self._create_job()
+ stored.write({'state': 'failed'})
+ followers = stored.message_follower_ids.mapped('partner_id')
+ self.assertFalse(inactiveusr.partner_id in followers)
+ self.assertFalse(
+ set([u.partner_id for u in group.users]) - set(followers))
+
+ def test_autovacuum(self):
+ stored = self._create_job()
+ stored.write({'date_done': '2000-01-01 00:00:00'})
+ self.env['queue.job'].autovacuum()
+ self.assertEqual(len(self.env['queue.job'].search([])), 0)
+
+ def test_wizard_requeue(self):
+ stored = self._create_job()
+ stored.write({'state': 'failed'})
+ model = self.env['queue.requeue.job']
+ model = model.with_context(active_model='queue.job',
+ active_ids=stored.ids)
+ model.create({}).requeue()
+ self.assertEqual(stored.state, PENDING)
+
+ def test_context_uuid(self):
+ delayable = self.env['test.queue.job'].with_delay()
+ test_job = delayable.testing_method(return_context=True)
+ result = test_job.perform()
+ key_present = 'job_uuid' in result
+ self.assertTrue(key_present)
+ self.assertEqual(result['job_uuid'], test_job._uuid)
+
+
+class TestJobStorageMultiCompany(common.TransactionCase):
+ """ Test storage of jobs """
+
+ def setUp(self):
+ super(TestJobStorageMultiCompany, self).setUp()
+ self.queue_job = self.env['queue.job']
+ grp_queue_job_manager = self.ref("queue_job.group_queue_job_manager")
+ User = self.env['res.users']
+ Company = self.env['res.company']
+ Partner = self.env['res.partner']
+ self.other_partner_a = Partner.create(
+ {"name": "My Company a",
+ "is_company": True,
+ "email": "test@tes.ttest",
+ })
+ self.other_company_a = Company.create(
+ {"name": "My Company a",
+ "partner_id": self.other_partner_a.id,
+ "rml_header1": "My Company Tagline",
+ "currency_id": self.ref("base.EUR")
+ })
+ self.other_user_a = User.create(
+ {"partner_id": self.other_partner_a.id,
+ "company_id": self.other_company_a.id,
+ "company_ids": [(4, self.other_company_a.id)],
+ "login": "my_login a",
+ "name": "my user",
+ "groups_id": [(4, grp_queue_job_manager)]
+ })
+ self.other_partner_b = Partner.create(
+ {"name": "My Company b",
+ "is_company": True,
+ "email": "test@tes.ttest",
+ })
+ self.other_company_b = Company.create(
+ {"name": "My Company b",
+ "partner_id": self.other_partner_b.id,
+ "rml_header1": "My Company Tagline",
+ "currency_id": self.ref("base.EUR")
+ })
+ self.other_user_b = User.create(
+ {"partner_id": self.other_partner_b.id,
+ "company_id": self.other_company_b.id,
+ "company_ids": [(4, self.other_company_b.id)],
+ "login": "my_login_b",
+ "name": "my user 1",
+ "groups_id": [(4, grp_queue_job_manager)]
+ })
+
+ def _create_job(self, env):
+ self.cr.execute('delete from queue_job')
+ env['test.queue.job'].with_delay().testing_method()
+ stored = self.queue_job.search([])
+ self.assertEqual(len(stored), 1)
+ return stored
+
+ def test_job_default_company_id(self):
+ """the default company is the one from the current user_id"""
+ stored = self._create_job(self.env)
+ self.assertEqual(stored.company_id.id,
+ self.ref("base.main_company"),
+ 'Incorrect default company_id')
+ env = self.env(user=self.other_user_b.id)
+ stored = self._create_job(env)
+ self.assertEqual(stored.company_id.id,
+ self.other_company_b.id,
+ 'Incorrect default company_id')
+
+ def test_job_no_company_id(self):
+ """ if we put an empty company_id in the context
+ jobs are created without company_id
+ """
+ env = self.env(context={'company_id': None})
+ stored = self._create_job(env)
+ self.assertFalse(stored.company_id,
+ 'Company_id should be empty')
+
+ def test_job_specific_company_id(self):
+ """If a company_id specified in the context
+ it's used by default for the job creation"""
+ env = self.env(context={'company_id': self.other_company_a.id})
+ stored = self._create_job(env)
+ self.assertEqual(stored.company_id.id,
+ self.other_company_a.id,
+ 'Incorrect company_id')
+
+ def test_job_subscription(self):
+ # if the job is created without company_id, all members of
+ # queue_job.group_queue_job_manager must be followers
+ User = self.env['res.users']
+ no_company_context = dict(self.env.context, company_id=None)
+ no_company_env = self.env(context=no_company_context)
+ stored = self._create_job(no_company_env)
+ stored._subscribe_users()
+ users = User.search(
+ [('groups_id', '=', self.ref('queue_job.group_queue_job_manager'))]
+ )
+ self.assertEqual(len(stored.message_follower_ids), len(users))
+ expected_partners = [u.partner_id for u in users]
+ self.assertSetEqual(
+ set(stored.message_follower_ids.mapped('partner_id')),
+ set(expected_partners))
+ followers_id = stored.message_follower_ids.mapped('partner_id.id')
+ self.assertIn(self.other_partner_a.id, followers_id)
+ self.assertIn(self.other_partner_b.id, followers_id)
+ # jobs created for a specific company_id are followed only by
+ # company's members
+ company_a_context = dict(self.env.context,
+ company_id=self.other_company_a.id)
+ company_a_env = self.env(context=company_a_context)
+ stored = self._create_job(company_a_env)
+ stored.sudo(self.other_user_a.id)._subscribe_users()
+ # 2 because admin + self.other_partner_a
+ self.assertEqual(len(stored.message_follower_ids), 2)
+ users = User.browse([SUPERUSER_ID, self.other_user_a.id])
+ expected_partners = [u.partner_id for u in users]
+ self.assertSetEqual(
+ set(stored.message_follower_ids.mapped('partner_id')),
+ set(expected_partners))
+ followers_id = stored.message_follower_ids.mapped('partner_id.id')
+ self.assertIn(self.other_partner_a.id, followers_id)
+ self.assertNotIn(self.other_partner_b.id, followers_id)
diff --git a/test_queue_job/tests/test_job_channels.py b/test_queue_job/tests/test_job_channels.py
new file mode 100644
index 0000000000..6b6ca389e3
--- /dev/null
+++ b/test_queue_job/tests/test_job_channels.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+from odoo import exceptions
+import odoo.tests.common as common
+from odoo.addons.queue_job.job import Job, job
+
+
+class TestJobChannels(common.TransactionCase):
+
+ def setUp(self):
+ super(TestJobChannels, self).setUp()
+ self.function_model = self.env['queue.job.function']
+ self.channel_model = self.env['queue.job.channel']
+ self.test_model = self.env['test.queue.channel']
+ self.root_channel = self.env.ref('queue_job.channel_root')
+
+ def test_channel_complete_name(self):
+ channel = self.channel_model.create({'name': 'number',
+ 'parent_id': self.root_channel.id,
+ })
+ subchannel = self.channel_model.create({'name': 'five',
+ 'parent_id': channel.id,
+ })
+ self.assertEquals(channel.complete_name, 'root.number')
+ self.assertEquals(subchannel.complete_name, 'root.number.five')
+
+ def test_channel_tree(self):
+ with self.assertRaises(exceptions.ValidationError):
+ self.channel_model.create({'name': 'sub'})
+
+ def test_channel_root(self):
+ with self.assertRaises(exceptions.Warning):
+ self.root_channel.unlink()
+ with self.assertRaises(exceptions.Warning):
+ self.root_channel.name = 'leaf'
+
+ def test_register_jobs(self):
+ self.env['queue.job.function'].search([]).unlink()
+ self.env['queue.job.channel'].search([('name', '!=', 'root')]).unlink()
+
+ method_a = self.env['test.queue.channel'].job_a
+ self.env['queue.job.function']._register_job(method_a)
+ method_b = self.env['test.queue.channel'].job_b
+ self.env['queue.job.function']._register_job(method_b)
+
+ path_a = '.job_a'
+ path_b = '.job_b'
+ self.assertTrue(
+ self.function_model.search([('name', '=', path_a)])
+ )
+ self.assertTrue(
+ self.function_model.search([('name', '=', path_b)])
+ )
+
+ def test_channel_on_job(self):
+ self.env['queue.job.function'].search([]).unlink()
+ self.env['queue.job.channel'].search([('name', '!=', 'root')]).unlink()
+
+ method = self.env['test.queue.channel'].job_a
+ self.env['queue.job.function']._register_job(method)
+ path_a = '<%s>.%s' % (method.im_class._name, method.__name__)
+ job_func = self.function_model.search([('name', '=', path_a)])
+ self.assertEquals(job_func.channel, 'root')
+
+ test_job = Job(method)
+ test_job.store()
+ stored = self.env['queue.job'].search([('uuid', '=', test_job.uuid)])
+ self.assertEquals(stored.channel, 'root')
+
+ channel = self.channel_model.create(
+ {'name': 'sub', 'parent_id': self.root_channel.id}
+ )
+ job_func.channel_id = channel
+
+ test_job = Job(method)
+ test_job.store()
+ stored = self.env['queue.job'].search([('uuid', '=', test_job.uuid)])
+ self.assertEquals(stored.channel, 'root.sub')
+
+ def test_default_channel(self):
+ self.env['queue.job.function'].search([]).unlink()
+ self.env['queue.job.channel'].search([('name', '!=', 'root')]).unlink()
+
+ method = self.env['test.queue.channel'].job_sub_channel
+ self.env['queue.job.function']._register_job(method)
+ self.assertEquals(method.default_channel, 'root.sub.subsub')
+
+ path_a = '<%s>.%s' % (method.im_class._name, method.__name__)
+ job_func = self.function_model.search([('name', '=', path_a)])
+
+ channel = job_func.channel_id
+ self.assertEquals(channel.name, 'subsub')
+ self.assertEquals(channel.parent_id.name, 'sub')
+ self.assertEquals(channel.parent_id.parent_id.name, 'root')
+ self.assertEquals(job_func.channel, 'root.sub.subsub')
+
+ def test_job_decorator(self):
+ """ Test the job decorator """
+ default_channel = 'channel'
+ retry_pattern = {1: 5}
+ partial = job(None, default_channel=default_channel,
+ retry_pattern=retry_pattern)
+ self.assertEquals(partial.keywords.get('default_channel'),
+ default_channel)
+ self.assertEquals(partial.keywords.get('retry_pattern'), retry_pattern)
diff --git a/test_queue_job/tests/test_related_actions.py b/test_queue_job/tests/test_related_actions.py
new file mode 100644
index 0000000000..d8d41cf2a6
--- /dev/null
+++ b/test_queue_job/tests/test_related_actions.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014-2016 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+import odoo.tests.common as common
+from odoo.addons.queue_job.job import Job
+
+
+class TestRelatedAction(common.TransactionCase):
+ """ Test Related Actions """
+
+ def setUp(self):
+ super(TestRelatedAction, self).setUp()
+ self.model = self.env['test.related.action']
+ self.method = self.env['test.queue.job'].testing_method
+
+ def test_return(self):
+ """ Job with related action check if action returns correctly """
+ job = Job(self.method)
+ act_job, act_kwargs = job.related_action()
+ self.assertEqual(act_job, job.db_record())
+ self.assertEqual(act_kwargs, {})
+
+ def test_no_related_action(self):
+ """ Job without related action """
+ job = Job(self.model.testing_related_action__no)
+ self.assertIsNone(job.related_action())
+
+ def test_return_none(self):
+ """ Job with related action returning None """
+ # default action returns None
+ job = Job(self.model.testing_related_action__return_none)
+ self.assertIsNone(job.related_action())
+
+ def test_kwargs(self):
+ """ Job with related action check if action propagates kwargs """
+ job_ = Job(self.model.testing_related_action__kwargs)
+ self.assertEqual(job_.related_action(), (job_.db_record(), {'b': 4}))
+
+ def test_store_related_action(self):
+ """ Call the related action on the model """
+ job = Job(self.model.testing_related_action__store,
+ args=('Discworld',))
+ job.store()
+ stored_job = self.env['queue.job'].search(
+ [('uuid', '=', job.uuid)]
+ )
+ self.assertEqual(len(stored_job), 1)
+ expected = {'type': 'ir.actions.act_url',
+ 'target': 'new',
+ 'url': 'https://en.wikipedia.org/wiki/Discworld',
+ }
+ self.assertEquals(stored_job.open_related_action(), expected)