From 950bd68520dd6d0ff1181f722753d7bd92d59548 Mon Sep 17 00:00:00 2001
From: Thierry Ducrest
Date: Wed, 30 Aug 2017 13:16:00 +0200
Subject: [PATCH 01/47] Create base_attachment_object_storage to extract common
code to store implementations
---
base_attachment_object_storage/README.rst | 7 +
base_attachment_object_storage/__init__.py | 1 +
.../__manifest__.py | 17 ++
.../models/__init__.py | 1 +
.../models/ir_attachment.py | 226 ++++++++++++++++++
5 files changed, 252 insertions(+)
create mode 100644 base_attachment_object_storage/README.rst
create mode 100644 base_attachment_object_storage/__init__.py
create mode 100644 base_attachment_object_storage/__manifest__.py
create mode 100644 base_attachment_object_storage/models/__init__.py
create mode 100644 base_attachment_object_storage/models/ir_attachment.py
diff --git a/base_attachment_object_storage/README.rst b/base_attachment_object_storage/README.rst
new file mode 100644
index 0000000000..c802fafca5
--- /dev/null
+++ b/base_attachment_object_storage/README.rst
@@ -0,0 +1,7 @@
+Base class for attachments on external object store
+===================================================
+
+This is a base addon that regroup common code used by addons targeting specific object store
+
+
+
diff --git a/base_attachment_object_storage/__init__.py b/base_attachment_object_storage/__init__.py
new file mode 100644
index 0000000000..0650744f6b
--- /dev/null
+++ b/base_attachment_object_storage/__init__.py
@@ -0,0 +1 @@
+from . import models
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
new file mode 100644
index 0000000000..b9e6610844
--- /dev/null
+++ b/base_attachment_object_storage/__manifest__.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+
+{'name': 'Base Attachment Object Store',
+ 'summary': 'Base module for the implementation of external object store.',
+ 'version': '10.0.1.1.0',
+ 'author': 'Camptocamp,Odoo Community Association (OCA)',
+ 'license': 'AGPL-3',
+ 'category': 'Knowledge Management',
+ 'depends': ['base'],
+ 'website': 'http://www.camptocamp.com',
+ 'data': [],
+ 'installable': True,
+ 'auto_install': True,
+ }
diff --git a/base_attachment_object_storage/models/__init__.py b/base_attachment_object_storage/models/__init__.py
new file mode 100644
index 0000000000..aaf38a167c
--- /dev/null
+++ b/base_attachment_object_storage/models/__init__.py
@@ -0,0 +1 @@
+from . import ir_attachment
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
new file mode 100644
index 0000000000..535986a843
--- /dev/null
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -0,0 +1,226 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 Camptocamp SA
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+
+
+import logging
+import os
+import psycopg2
+import odoo
+
+from contextlib import closing, contextmanager
+from odoo import api, exceptions, models, _
+
+
+_logger = logging.getLogger(__name__)
+
+
+class IrAttachment(models.Model):
+ _inherit = 'ir.attachment'
+
+ _local_fields = ('image_small', 'image_medium', 'web_icon_data')
+
+ @api.multi
+ def _save_in_db_anyway(self):
+ """ Return whether an attachment must be stored in db
+
+ When we are using an Object Store. This is sometimes required
+ because the object storage is slower than the database/filesystem.
+
+ We store image_small and image_medium from 'Binary' fields
+ because they should be fast to read as they are often displayed
+ in kanbans / lists. The same for web_icon_data.
+
+ We store the assets locally as well. Not only for performance,
+ but also because it improves the portability of the database:
+ when assets are invalidated, they are deleted so we don't have
+ an old database with attachments pointing to deleted assets.
+
+ """
+ self.ensure_one()
+ # assets
+ if self.res_model == 'ir.ui.view':
+ # assets are stored in 'ir.ui.view'
+ return True
+
+ # Binary fields
+ if self.res_field:
+ # Binary fields are stored with the name of the field in
+ # 'res_field'
+ # 'image' fields can be rather large and should usually
+ # not be requests in bulk in lists
+ if self.res_field and self.res_field in self._local_fields:
+ return True
+ return False
+
+ def _inverse_datas(self):
+ # override in order to store files that need fast access,
+ # we keep them in the database instead of the object storage
+ location = self._storage()
+ for attach in self:
+ if location in self._get_stores() and attach._save_in_db_anyway():
+ # compute the fields that depend on datas
+ value = attach.datas
+ bin_data = value and value.decode('base64') or ''
+ vals = {
+ 'file_size': len(bin_data),
+ 'checksum': self._compute_checksum(bin_data),
+ 'db_datas': value,
+ # we seriously don't need index content on those fields
+ 'index_content': False,
+ 'store_fname': False,
+ }
+ fname = attach.store_fname
+ # write as superuser, as user probably does not
+ # have write access
+ super(IrAttachment, attach.sudo()).write(vals)
+ if fname:
+ self._file_delete(fname)
+ continue
+ super(IrAttachment, attach)._inverse_datas()
+
+ @api.model
+ def _file_read(self, fname, bin_size=False):
+ if self._is_file_from_a_store(fname):
+ return self._store_file_read(fname, bin_size=bin_size)
+ else:
+ _super = super(IrAttachment, self)
+ return _super._file_read(fname, bin_size=bin_size)
+
+ @api.model
+ def _file_write(self, value, checksum):
+ if self._storage() in self._get_stores():
+ filename = self._store_file_write(value, checksum)
+ else:
+ filename = super(IrAttachment, self)._file_write(value, checksum)
+ return filename
+
+ @api.model
+ def _file_delete(self, fname):
+ if self._is_file_from_a_store(fname):
+ cr = self.env.cr
+ cr.execute("SELECT COUNT(*) FROM ir_attachment "
+ "WHERE store_fname = %s", (fname,))
+ count = cr.fetchone()[0]
+ if not count:
+ self._store_file_delete(fname)
+ else:
+ super(IrAttachment, self)._file_delete(fname)
+
+ @api.model
+ def _is_file_from_a_store(self, fname):
+ for store_name in self._get_stores():
+ uri = '{}://'.format(store_name)
+ if fname.startswith(uri):
+ return True
+ return False
+
+ @contextmanager
+ def do_in_new_env(self, new_cr=False):
+ """ Context manager that yields a new environment
+
+ Using a new Odoo Environment thus a new PG transaction.
+ """
+ with api.Environment.manage():
+ if new_cr:
+ registry = odoo.modules.registry.RegistryManager.get(
+ self.env.cr.dbname
+ )
+ with closing(registry.cursor()) as cr:
+ try:
+ yield self.env(cr=cr)
+ except:
+ cr.rollback()
+ raise
+ else:
+ # disable pylint error because this is a valid commit,
+ # we are in a new env
+ cr.commit() # pylint: disable=invalid-commit
+ else:
+ # make a copy
+ yield self.env()
+
+ @api.multi
+ def _move_attachment_to_store(self):
+ self.ensure_one()
+ _logger.info('inspecting attachment %s (%d)', self.name, self.id)
+ fname = self.store_fname
+ if fname:
+ # migrating from filesystem filestore
+ # or from the old 'store_fname' without the bucket name
+ _logger.info('moving %s on the object storage', fname)
+ self.write({'datas': self.datas,
+ # this is required otherwise the
+ # mimetype gets overriden with
+ # 'application/octet-stream'
+ # on assets
+ 'mimetype': self.mimetype})
+ _logger.info('moved %s on the object storage', fname)
+ full_path = self._full_path(fname)
+ _logger.info('cleaning fs self')
+ if os.path.exists(full_path):
+ try:
+ os.unlink(full_path)
+ except OSError:
+ _logger.info(
+ "_file_delete could not unlink %s",
+ full_path, exc_info=True
+ )
+ except IOError:
+ # Harmless and needed for race conditions
+ _logger.info(
+ "_file_delete could not unlink %s",
+ full_path, exc_info=True
+ )
+ elif self.db_datas:
+ _logger.info('moving on the object storage from database')
+ self.write({'datas': self.datas})
+
+ @api.model
+ def force_storage(self):
+ if not self.env['res.users'].browse(self.env.uid)._is_admin():
+ raise exceptions.AccessError(
+ _('Only administrators can execute this action.'))
+ storage = self._storage()
+ if storage not in self._get_stores():
+ return super(IrAttachment, self).force_storage()
+ _logger.info('migrating files to the object storage')
+ domain = ['!', ('store_fname', '=like', '{}://%'.format(storage)),
+ '|',
+ ('res_field', '=', False),
+ ('res_field', '!=', False)]
+ # We do a copy of the environment so we can workaround the
+ # cache issue below. We do not create a new cursor because
+ # it causes serialization issues due to concurrent updates on
+ # attachments during the installation
+ with self.do_in_new_env() as new_env:
+ model_env = new_env['ir.attachment']
+ ids = model_env.search(domain).ids
+ for attachment_id in ids:
+ try:
+ with new_env.cr.savepoint():
+ # check that no other transaction has
+ # locked the row, don't send a file to S3
+ # in that case
+ self.env.cr.execute("SELECT id "
+ "FROM ir_attachment "
+ "WHERE id = %s "
+ "FOR UPDATE NOWAIT",
+ (attachment_id,),
+ log_exceptions=False)
+
+ # This is a trick to avoid having the 'datas'
+ # function fields computed for every attachment on
+ # each iteration of the loop. The former issue
+ # being that it reads the content of the file of
+ # ALL the attachments on each loop.
+ new_env.clear()
+ attachment = model_env.browse(attachment_id)
+ attachment._move_attachment_to_store()
+ except psycopg2.OperationalError:
+ _logger.error('Could not migrate attachment %s to S3',
+ attachment_id)
+
+ def _get_stores(self):
+ """ To get the list of stores activated in the system """
+ return []
From 2031da2856fe408570c5193cf83f15578bcd5f83 Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 20 Sep 2017 09:13:45 +0200
Subject: [PATCH 02/47] Abstract object storage in attachment_s3
Using the base_attachment_object_storage module, the same way
attachment_swift is done. Fixed a few issues along the way in
attachment_swift.
---
.../models/ir_attachment.py | 38 ++++++++++++++++---
1 file changed, 32 insertions(+), 6 deletions(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 535986a843..9063cc6f61 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -87,10 +87,29 @@ def _file_read(self, fname, bin_size=False):
_super = super(IrAttachment, self)
return _super._file_read(fname, bin_size=bin_size)
+ def _store_file_read(self, fname, bin_size=False):
+ storage = fname.partition('://')[0]
+ raise NotImplementedError(
+ 'No implementation for %s' % (storage,)
+ )
+
+ def _store_file_write(self, key, bin_data):
+ raise NotImplementedError(
+ 'No implementation for %s' % (self.storage(),)
+ )
+
+ def _store_file_delete(self, fname):
+ storage = fname.partition('://')[0]
+ raise NotImplementedError(
+ 'No implementation for %s' % (storage,)
+ )
+
@api.model
def _file_write(self, value, checksum):
if self._storage() in self._get_stores():
- filename = self._store_file_write(value, checksum)
+ bin_data = value.decode('base64')
+ key = self._compute_checksum(bin_data)
+ filename = self._store_file_write(key, bin_data)
else:
filename = super(IrAttachment, self)._file_write(value, checksum)
return filename
@@ -99,6 +118,8 @@ def _file_write(self, value, checksum):
def _file_delete(self, fname):
if self._is_file_from_a_store(fname):
cr = self.env.cr
+ # using SQL to include files hidden through unlink or due to record
+ # rules
cr.execute("SELECT COUNT(*) FROM ir_attachment "
"WHERE store_fname = %s", (fname,))
count = cr.fetchone()[0]
@@ -184,16 +205,21 @@ def force_storage(self):
storage = self._storage()
if storage not in self._get_stores():
return super(IrAttachment, self).force_storage()
+ self._force_storage_to_object_storage()
+
+ @api.model
+ def _force_storage_to_object_storage(self, new_cr=False):
_logger.info('migrating files to the object storage')
+ storage = self._storage()
domain = ['!', ('store_fname', '=like', '{}://%'.format(storage)),
'|',
('res_field', '=', False),
('res_field', '!=', False)]
- # We do a copy of the environment so we can workaround the
- # cache issue below. We do not create a new cursor because
- # it causes serialization issues due to concurrent updates on
- # attachments during the installation
- with self.do_in_new_env() as new_env:
+ # We do a copy of the environment so we can workaround the cache issue
+ # below. We do not create a new cursor by default because it causes
+ # serialization issues due to concurrent updates on attachments during
+ # the installation
+ with self.do_in_new_env(new_cr=new_cr) as new_env:
model_env = new_env['ir.attachment']
ids = model_env.search(domain).ids
for attachment_id in ids:
From 914eed7e3bbc15ef631f3e2ff9bb08af2da1f208 Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 15 Nov 2017 14:31:23 +0100
Subject: [PATCH 03/47] Set addons uninstallable
---
base_attachment_object_storage/__manifest__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index b9e6610844..cf507b9d2a 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -12,6 +12,6 @@
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': [],
- 'installable': True,
+ 'installable': False,
'auto_install': True,
}
From 36fddd0aa27c4ef1c46d1469235a1e47c01ef70c Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 15 Nov 2017 14:56:45 +0100
Subject: [PATCH 04/47] Set addons installable
---
base_attachment_object_storage/__manifest__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index cf507b9d2a..97e55b33ba 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -5,13 +5,13 @@
{'name': 'Base Attachment Object Store',
'summary': 'Base module for the implementation of external object store.',
- 'version': '10.0.1.1.0',
+ 'version': '11.0.1.0.0',
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Knowledge Management',
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': [],
- 'installable': False,
+ 'installable': True,
'auto_install': True,
}
From 3ebda002ff5eb3727e06850f78cbca89d37d2551 Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 15 Nov 2017 15:54:59 +0100
Subject: [PATCH 05/47] Replace value.decode('base64') by base64.b64decode
(py3)
---
base_attachment_object_storage/models/ir_attachment.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 9063cc6f61..64f800a739 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -3,6 +3,7 @@
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
+import base64
import logging
import os
import psycopg2
@@ -61,7 +62,7 @@ def _inverse_datas(self):
if location in self._get_stores() and attach._save_in_db_anyway():
# compute the fields that depend on datas
value = attach.datas
- bin_data = value and value.decode('base64') or ''
+ bin_data = base64.b64decode(value) if value else ''
vals = {
'file_size': len(bin_data),
'checksum': self._compute_checksum(bin_data),
@@ -107,7 +108,7 @@ def _store_file_delete(self, fname):
@api.model
def _file_write(self, value, checksum):
if self._storage() in self._get_stores():
- bin_data = value.decode('base64')
+ bin_data = base64.b64decode(value)
key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
else:
From 9fd0c3fd6a1413784635f44292d8d1906a111985 Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 13 Jun 2018 15:59:16 +0200
Subject: [PATCH 06/47] Ensure that migration of files is commited before
deleting files
When moving attachments from the filestore to an object storage, the
filesystem files will be deleted only after the commit, so if the
transaction is rollbacked, we still have the local files for another
try.
---
.../models/ir_attachment.py | 51 ++++++++++++-------
1 file changed, 33 insertions(+), 18 deletions(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 64f800a739..eb39694528 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -16,6 +16,25 @@
_logger = logging.getLogger(__name__)
+def clean_fs(files):
+ _logger.info('cleaning old files from filestore')
+ for full_path in files:
+ if os.path.exists(full_path):
+ try:
+ os.unlink(full_path)
+ except OSError:
+ _logger.info(
+ "_file_delete could not unlink %s",
+ full_path, exc_info=True
+ )
+ except IOError:
+ # Harmless and needed for race conditions
+ _logger.info(
+ "_file_delete could not unlink %s",
+ full_path, exc_info=True
+ )
+
+
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
@@ -178,22 +197,7 @@ def _move_attachment_to_store(self):
# on assets
'mimetype': self.mimetype})
_logger.info('moved %s on the object storage', fname)
- full_path = self._full_path(fname)
- _logger.info('cleaning fs self')
- if os.path.exists(full_path):
- try:
- os.unlink(full_path)
- except OSError:
- _logger.info(
- "_file_delete could not unlink %s",
- full_path, exc_info=True
- )
- except IOError:
- # Harmless and needed for race conditions
- _logger.info(
- "_file_delete could not unlink %s",
- full_path, exc_info=True
- )
+ return self._full_path(fname)
elif self.db_datas:
_logger.info('moving on the object storage from database')
self.write({'datas': self.datas})
@@ -223,11 +227,12 @@ def _force_storage_to_object_storage(self, new_cr=False):
with self.do_in_new_env(new_cr=new_cr) as new_env:
model_env = new_env['ir.attachment']
ids = model_env.search(domain).ids
+ files_to_clean = []
for attachment_id in ids:
try:
with new_env.cr.savepoint():
# check that no other transaction has
- # locked the row, don't send a file to S3
+ # locked the row, don't send a file to storage
# in that case
self.env.cr.execute("SELECT id "
"FROM ir_attachment "
@@ -243,11 +248,21 @@ def _force_storage_to_object_storage(self, new_cr=False):
# ALL the attachments on each loop.
new_env.clear()
attachment = model_env.browse(attachment_id)
- attachment._move_attachment_to_store()
+ path = attachment._move_attachment_to_store()
+ if path:
+ files_to_clean.append(path)
except psycopg2.OperationalError:
_logger.error('Could not migrate attachment %s to S3',
attachment_id)
+ def clean():
+ clean_fs(files_to_clean)
+
+ # delete the files from the filesystem once we know the changes
+ # have been committed in ir.attachment
+ if files_to_clean:
+ new_env.cr.after('commit', clean)
+
def _get_stores(self):
""" To get the list of stores activated in the system """
return []
From 3a7ea8b720edb778807373374514f7f1faa1c98b Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 13 Jun 2018 16:00:57 +0200
Subject: [PATCH 07/47] Fix attachments stored in FS instead of object storage
Assume the following situation:
* We have installed addons base, sale and attachment_s3 (hence
base_attachment_object_storage as dependency)
* All attachments are in S3 already
* We run an upgrade of the 'base' addon, 'sale' is upgraded before
attachment_s3 in the order of loading.
* Sale updates the icon of the Sale menu
* As attachment_s3 is not loaded yet, the attachment is created in the
filestore
Now if we don't persist the filestore or use different servers, we'll
lose the images of the menus (or any attachment loaded by the
install/upgrade of an addon).
The implemented solution is to move the attachments from the filestore
to the object storage at the loading of the module. However, this
operation can take time and it shouldn't be run by 2 processes at the
same time, so we want to detect if the module is loaded during a normal odoo
startup or when some addons have been upgraded. There is nothing anymore
at this point which allow us to know that modules just have been
upgraded except... in the caller frame (load_modules). We have to rely
on the inpect module and get the caller frame, which is not recommended,
but seems the only way, besides, it's not called often and if
_register_hook was called from another place, it would have no effect
(unless the other place has a variable 'update_module' too).
---
.../models/ir_attachment.py | 29 ++++++++++++++++++-
1 file changed, 28 insertions(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index eb39694528..153282d4ea 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -2,8 +2,8 @@
# Copyright 2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
-
import base64
+import inspect
import logging
import os
import psycopg2
@@ -40,6 +40,33 @@ class IrAttachment(models.Model):
_local_fields = ('image_small', 'image_medium', 'web_icon_data')
+ @api.cr
+ def _register_hook(self):
+ super(IrAttachment, self)._register_hook()
+ # ignore if we are not using an object storage
+ if self._storage() not in self._get_stores():
+ return
+ curframe = inspect.currentframe()
+ calframe = inspect.getouterframes(curframe, 2)
+ # the caller of _register_hook is 'load_modules' in
+ # odoo/modules/loading.py
+ load_modules_frame = calframe[1][0]
+ # 'update_module' is an argument that 'load_modules' receives with a
+ # True-ish value meaning that an install or upgrade of addon has been
+ # done during the initialization. We need to move the attachments that
+ # could have been created or updated in other addons before this addon
+ # was loaded
+ update_module = load_modules_frame.f_locals.get('update_module')
+
+ # We need to call the migration on the loading of the model because
+ # when we are upgrading addons, some of them might add attachments.
+ # To be sure they are migrated to the storage we need to call the
+ # migration here.
+ # Typical example is images of ir.ui.menu which are updated in
+ # ir.attachment at every upgrade of the addons
+ if update_module:
+ self.env['ir.attachment'].sudo()._force_storage_to_object_storage()
+
@api.multi
def _save_in_db_anyway(self):
""" Return whether an attachment must be stored in db
From addc78d3a1529532809c129a0312a7d2af6b95db Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 13 Jun 2018 16:01:22 +0200
Subject: [PATCH 08/47] Document a weird domain which is there for a reason
The reason being:
https://github.com/odoo/odoo/blob/9032617120138848c63b3cfa5d1913c5e5ad76db/odoo/addons/base/ir/ir_attachment.py#L344-L347
I nearly deleted this domain but it was too weird to be there for no
reason. A comment explaining the issue was really missing.
---
base_attachment_object_storage/models/ir_attachment.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 153282d4ea..dbf6979d90 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -243,6 +243,11 @@ def force_storage(self):
def _force_storage_to_object_storage(self, new_cr=False):
_logger.info('migrating files to the object storage')
storage = self._storage()
+ # The weird "res_field = False OR res_field != False" domain
+ # is required! It's because of an override of _search in ir.attachment
+ # which adds ('res_field', '=', False) when the domain does not
+ # contain 'res_field'.
+ # https://github.com/odoo/odoo/blob/9032617120138848c63b3cfa5d1913c5e5ad76db/odoo/addons/base/ir/ir_attachment.py#L344-L347
domain = ['!', ('store_fname', '=like', '{}://%'.format(storage)),
'|',
('res_field', '=', False),
From b93f3d1cb992eeadf814781403eaa9f84eb01f05 Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 13 Jun 2018 17:25:21 +0200
Subject: [PATCH 09/47] base_attachment_object_storage: bump 1.1.0
---
base_attachment_object_storage/__manifest__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index 97e55b33ba..e3091339e7 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -5,7 +5,7 @@
{'name': 'Base Attachment Object Store',
'summary': 'Base module for the implementation of external object store.',
- 'version': '11.0.1.0.0',
+ 'version': '11.0.1.1.0',
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Knowledge Management',
From 72feffecacf890f2525f45eccaab3489382c8e4a Mon Sep 17 00:00:00 2001
From: jcoux
Date: Wed, 24 Oct 2018 11:53:27 +0200
Subject: [PATCH 10/47] Set all modules to uninstallable
---
base_attachment_object_storage/__manifest__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index e3091339e7..c3d63d871b 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -12,6 +12,6 @@
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': [],
- 'installable': True,
+ 'installable': False,
'auto_install': True,
}
From 70a122906c9c8939a3ce0157a69017c5a0b3e167 Mon Sep 17 00:00:00 2001
From: jcoux
Date: Wed, 24 Oct 2018 11:58:38 +0200
Subject: [PATCH 11/47] Migration to 12.0
---
base_attachment_object_storage/__manifest__.py | 7 +++----
base_attachment_object_storage/models/ir_attachment.py | 3 +--
2 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index c3d63d871b..df802e6c99 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -1,17 +1,16 @@
-# -*- coding: utf-8 -*-
-# Copyright 2017 Camptocamp SA
+# Copyright 2018 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
{'name': 'Base Attachment Object Store',
'summary': 'Base module for the implementation of external object store.',
- 'version': '11.0.1.1.0',
+ 'version': '12.0.1.1.0',
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Knowledge Management',
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': [],
- 'installable': False,
+ 'installable': True,
'auto_install': True,
}
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index dbf6979d90..150d60f9ce 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-# Copyright 2017 Camptocamp SA
+# Copyright 2018 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import base64
From d5a5f114b8fe66053060fd1c033192228affeede Mon Sep 17 00:00:00 2001
From: jcoux
Date: Fri, 23 Nov 2018 09:31:46 +0100
Subject: [PATCH 12/47] fixup! Migration to 12.0
---
base_attachment_object_storage/__manifest__.py | 2 +-
base_attachment_object_storage/models/ir_attachment.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index df802e6c99..58d85fc198 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Camptocamp SA
+# Copyright 2017-2018 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 150d60f9ce..deda46cf26 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Camptocamp SA
+# Copyright 2017-2018 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import base64
From 0e8ca9aa8fb9ef6a60a1bd51f2bb35c745802b53 Mon Sep 17 00:00:00 2001
From: Akim Juillerat
Date: Tue, 26 Feb 2019 23:43:09 +0100
Subject: [PATCH 13/47] [IMP]: Allow to pass storage as a context key
---
.../models/ir_attachment.py | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index deda46cf26..51303bdf62 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -42,8 +42,9 @@ class IrAttachment(models.Model):
@api.cr
def _register_hook(self):
super(IrAttachment, self)._register_hook()
+ location = self.env.context.get('storage_location') or self._storage()
# ignore if we are not using an object storage
- if self._storage() not in self._get_stores():
+ if location not in self._get_stores():
return
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
@@ -102,7 +103,7 @@ def _save_in_db_anyway(self):
def _inverse_datas(self):
# override in order to store files that need fast access,
# we keep them in the database instead of the object storage
- location = self._storage()
+ location = self.env.context.get('storage_location') or self._storage()
for attach in self:
if location in self._get_stores() and attach._save_in_db_anyway():
# compute the fields that depend on datas
@@ -152,7 +153,8 @@ def _store_file_delete(self, fname):
@api.model
def _file_write(self, value, checksum):
- if self._storage() in self._get_stores():
+ location = self.env.context.get('storage_location') or self._storage()
+ if location in self._get_stores():
bin_data = base64.b64decode(value)
key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
@@ -233,15 +235,15 @@ def force_storage(self):
if not self.env['res.users'].browse(self.env.uid)._is_admin():
raise exceptions.AccessError(
_('Only administrators can execute this action.'))
- storage = self._storage()
- if storage not in self._get_stores():
+ location = self.env.context.get('storage_location') or self._storage()
+ if location not in self._get_stores():
return super(IrAttachment, self).force_storage()
self._force_storage_to_object_storage()
@api.model
def _force_storage_to_object_storage(self, new_cr=False):
_logger.info('migrating files to the object storage')
- storage = self._storage()
+ storage = self.env.context.get('storage_location') or self._storage()
# The weird "res_field = False OR res_field != False" domain
# is required! It's because of an override of _search in ir.attachment
# which adds ('res_field', '=', False) when the domain does not
From 2de76dd46c9f65286b0581720c59a4c6dc6d5b48 Mon Sep 17 00:00:00 2001
From: Akim Juillerat
Date: Fri, 1 Mar 2019 18:56:54 +0100
Subject: [PATCH 14/47] [IMP]: Allow to use context Key as storage key
---
base_attachment_object_storage/models/ir_attachment.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 51303bdf62..85296c8eee 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -156,7 +156,9 @@ def _file_write(self, value, checksum):
location = self.env.context.get('storage_location') or self._storage()
if location in self._get_stores():
bin_data = base64.b64decode(value)
- key = self._compute_checksum(bin_data)
+ key = self.env.context.get('force_storage_key')
+ if not key:
+ key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
else:
filename = super(IrAttachment, self)._file_write(value, checksum)
From 37c3b6980f8779c14ffc39010edb268bafbee3ec Mon Sep 17 00:00:00 2001
From: Tonow-c2c
Date: Mon, 7 Oct 2019 12:16:06 +0200
Subject: [PATCH 15/47] BSRD-286: Set the addons to uninstallable
---
base_attachment_object_storage/__manifest__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index 58d85fc198..b6a3fcbf5c 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -11,6 +11,6 @@
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': [],
- 'installable': True,
+ 'installable': False,
'auto_install': True,
}
From 4123fbe55ed46379593235c67851fffdc6494574 Mon Sep 17 00:00:00 2001
From: Akim Juillerat
Date: Mon, 7 Oct 2019 13:33:53 +0200
Subject: [PATCH 16/47] [MIG] base_attachment_object_storage: Migration to 13.0
---
base_attachment_object_storage/__manifest__.py | 6 +++---
.../models/ir_attachment.py | 16 ++++++----------
2 files changed, 9 insertions(+), 13 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index b6a3fcbf5c..ed66feaa83 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -1,16 +1,16 @@
-# Copyright 2017-2018 Camptocamp SA
+# Copyright 2017-2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
{'name': 'Base Attachment Object Store',
'summary': 'Base module for the implementation of external object store.',
- 'version': '12.0.1.1.0',
+ 'version': '13.0.1.1.0',
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Knowledge Management',
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': [],
- 'installable': False,
+ 'installable': True,
'auto_install': True,
}
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 85296c8eee..0caa39a8bc 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -1,4 +1,4 @@
-# Copyright 2017-2018 Camptocamp SA
+# Copyright 2017-2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import base64
@@ -39,9 +39,8 @@ class IrAttachment(models.Model):
_local_fields = ('image_small', 'image_medium', 'web_icon_data')
- @api.cr
def _register_hook(self):
- super(IrAttachment, self)._register_hook()
+ super()._register_hook()
location = self.env.context.get('storage_location') or self._storage()
# ignore if we are not using an object storage
if location not in self._get_stores():
@@ -67,7 +66,6 @@ def _register_hook(self):
if update_module:
self.env['ir.attachment'].sudo()._force_storage_to_object_storage()
- @api.multi
def _save_in_db_anyway(self):
""" Return whether an attachment must be stored in db
@@ -131,8 +129,7 @@ def _file_read(self, fname, bin_size=False):
if self._is_file_from_a_store(fname):
return self._store_file_read(fname, bin_size=bin_size)
else:
- _super = super(IrAttachment, self)
- return _super._file_read(fname, bin_size=bin_size)
+ return super()._file_read(fname, bin_size=bin_size)
def _store_file_read(self, fname, bin_size=False):
storage = fname.partition('://')[0]
@@ -161,7 +158,7 @@ def _file_write(self, value, checksum):
key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
else:
- filename = super(IrAttachment, self)._file_write(value, checksum)
+ filename = super()._file_write(value, checksum)
return filename
@api.model
@@ -176,7 +173,7 @@ def _file_delete(self, fname):
if not count:
self._store_file_delete(fname)
else:
- super(IrAttachment, self)._file_delete(fname)
+ super()._file_delete(fname)
@api.model
def _is_file_from_a_store(self, fname):
@@ -211,7 +208,6 @@ def do_in_new_env(self, new_cr=False):
# make a copy
yield self.env()
- @api.multi
def _move_attachment_to_store(self):
self.ensure_one()
_logger.info('inspecting attachment %s (%d)', self.name, self.id)
@@ -239,7 +235,7 @@ def force_storage(self):
_('Only administrators can execute this action.'))
location = self.env.context.get('storage_location') or self._storage()
if location not in self._get_stores():
- return super(IrAttachment, self).force_storage()
+ return super().force_storage()
self._force_storage_to_object_storage()
@api.model
From bb86bd86353c83a19a8a87445a458aa2085aeabd Mon Sep 17 00:00:00 2001
From: vrenaville
Date: Tue, 3 Dec 2019 14:44:15 +0100
Subject: [PATCH 17/47] [IMP] route file to db base on size and mimetype
---
.../__manifest__.py | 2 +-
.../data/res_config_settings_data.xml | 16 ++++++++++
.../models/ir_attachment.py | 30 ++++++++++++-------
3 files changed, 37 insertions(+), 11 deletions(-)
create mode 100644 base_attachment_object_storage/data/res_config_settings_data.xml
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index ed66feaa83..76a1a13a4c 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -10,7 +10,7 @@
'category': 'Knowledge Management',
'depends': ['base'],
'website': 'http://www.camptocamp.com',
- 'data': [],
+ 'data': ['data/res_config_settings_data.xml'],
'installable': True,
'auto_install': True,
}
diff --git a/base_attachment_object_storage/data/res_config_settings_data.xml b/base_attachment_object_storage/data/res_config_settings_data.xml
new file mode 100644
index 0000000000..199d1f57ae
--- /dev/null
+++ b/base_attachment_object_storage/data/res_config_settings_data.xml
@@ -0,0 +1,16 @@
+
+
+
+ mimetypes.list.storedb
+ image
+
+
+ file.maxsize.storedb
+ 50000
+
+
+ excluded.models.storedb
+ mail.message,mail.mail
+
+
+
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 0caa39a8bc..596fb1ee74 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -10,6 +10,7 @@
from contextlib import closing, contextmanager
from odoo import api, exceptions, models, _
+from odoo.tools.mimetypes import guess_mimetype
_logger = logging.getLogger(__name__)
@@ -37,8 +38,6 @@ def clean_fs(files):
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
- _local_fields = ('image_small', 'image_medium', 'web_icon_data')
-
def _register_hook(self):
super()._register_hook()
location = self.env.context.get('storage_location') or self._storage()
@@ -87,14 +86,25 @@ def _save_in_db_anyway(self):
if self.res_model == 'ir.ui.view':
# assets are stored in 'ir.ui.view'
return True
-
- # Binary fields
- if self.res_field:
- # Binary fields are stored with the name of the field in
- # 'res_field'
- # 'image' fields can be rather large and should usually
- # not be requests in bulk in lists
- if self.res_field and self.res_field in self._local_fields:
+ # Check if model must never be stored on DB
+ excluded_model_settings = self.env['ir.config_parameter'].sudo().\
+ get_param('excluded.models.storedb', default='')
+ excluded_model_for_db_store = excluded_model_settings.split(',')
+ if self.res_model in excluded_model_for_db_store:
+ return False
+ # Check if file size and mimetype fit requirements
+ data_to_store = self.datas
+ bin_data = base64.b64decode(data_to_store) if data_to_store else ''
+ current_mimetype = guess_mimetype(bin_data)
+ mimetypes_settings = self.env['ir.config_parameter'].sudo().get_param(
+ 'mimetypes.list.storedb', default='')
+ mimetypes_for_db_store = mimetypes_settings.split(',')
+ if any(current_mimetype.startswith(val) for val in
+ mimetypes_for_db_store):
+ # get allowed size
+ filesize = self.env['ir.config_parameter'].sudo().get_param(
+ 'file.maxsize.storedb', default='0')
+ if len(bin_data) < int(filesize):
return True
return False
From 0318aa2c730ba1ac274ef9aecf1f33ec20662c50 Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 1 May 2019 16:02:41 +0200
Subject: [PATCH 18/47] Add method to force storage of special attachments to
DB
Some attachments (e.g. image_small, image_medium) are stored in DB
instead of the object storage for faster access.
In some situations, we may have pushed all these files on the Object
Storage (migration from a filesystem to object storage) and want to
bring back these attachments from the object storage to the database.
This method is not called anywhere but can be called by RPC or scripts.
---
.../models/ir_attachment.py | 99 ++++++++++++++++++-
1 file changed, 98 insertions(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 596fb1ee74..1b0122d35e 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -5,12 +5,15 @@
import inspect
import logging
import os
+import time
+
import psycopg2
import odoo
from contextlib import closing, contextmanager
from odoo import api, exceptions, models, _
from odoo.tools.mimetypes import guess_mimetype
+from odoo.osv.expression import AND, normalize_domain
_logger = logging.getLogger(__name__)
@@ -65,6 +68,36 @@ def _register_hook(self):
if update_module:
self.env['ir.attachment'].sudo()._force_storage_to_object_storage()
+ @api.model
+ def _save_in_db_domain(self):
+ """Return a domain for attachments that must be forced to DB
+
+ Read the docstring of ``_save_in_db_anyway`` for more details.
+
+ The domain must be inline with the conditions in
+ ``_save_in_db_anyway``.
+ """
+ excluded_model_settings = self.env['ir.config_parameter'].sudo().\
+ get_param('excluded.models.storedb', default='')
+ excluded_model_for_db_store = excluded_model_settings.split(',')
+ mimetypes_settings = self.env['ir.config_parameter'].sudo().get_param(
+ 'mimetypes.list.storedb', default='')
+ mimetypes_for_db_store = mimetypes_settings.split(',')
+ filesize = self.env['ir.config_parameter'].sudo().get_param(
+ 'file.maxsize.storedb', default='0')
+ domain = [
+ '|',
+ # assets are stored in 'ir.ui.view'
+ ('res_model', '=', 'ir.ui.view'),
+ '&', '&',
+ ('file_size', '<', int(filesize)),
+ ('res_model', 'not in', excluded_model_for_db_store),
+ ]
+ domain += ['|'] * (len(mimetypes_for_db_store) - 1)
+ domain += [('mimetype', '=like', mimetype) for mimetype in
+ mimetypes_for_db_store]
+ return domain
+
def _save_in_db_anyway(self):
""" Return whether an attachment must be stored in db
@@ -80,8 +113,13 @@ def _save_in_db_anyway(self):
when assets are invalidated, they are deleted so we don't have
an old database with attachments pointing to deleted assets.
+ The conditions must be inline with the domain in
+ ``_save_in_db_domain``.
+
"""
self.ensure_one()
+ # Note: we cannot use _save_in_db_domain because we can be working
+ # with new records here. The conditions must stay inline though.
# assets
if self.res_model == 'ir.ui.view':
# assets are stored in 'ir.ui.view'
@@ -201,7 +239,7 @@ def do_in_new_env(self, new_cr=False):
"""
with api.Environment.manage():
if new_cr:
- registry = odoo.modules.registry.RegistryManager.get(
+ registry = odoo.modules.registry.Registry.new(
self.env.cr.dbname
)
with closing(registry.cursor()) as cr:
@@ -248,6 +286,65 @@ def force_storage(self):
return super().force_storage()
self._force_storage_to_object_storage()
+ @api.model
+ def force_storage_to_db_for_special_fields(self, new_cr=False):
+ """Migrate special attachments from Object Storage back to database
+
+ The access to a file stored on the objects storage is slower
+ than a local disk or database access. For attachments like
+ image_small that are accessed in batch for kanban views, this
+ is too slow. We store this type of attachment in the database.
+
+ This method can be used when migrating a filestore where all the files,
+ including the special files (assets, image_small, ...) have been pushed
+ to the Object Storage and we want to write them back in the database.
+
+ It is not called anywhere, but can be called by RPC or scripts.
+ """
+ storage = self._storage()
+ if storage not in self._get_stores():
+ return
+
+ domain = AND((
+ normalize_domain(
+ [('store_fname', '=like', '{}://%'.format(storage))]
+ ),
+ normalize_domain(self._save_in_db_domain())
+ ))
+
+ with self.do_in_new_env(new_cr=new_cr) as new_env:
+ model_env = new_env['ir.attachment'].with_context(
+ prefetch_fields=False
+ )
+ attachment_ids = model_env.search(domain).ids
+ if not attachment_ids:
+ return
+ total = len(attachment_ids)
+ start_time = time.time()
+ _logger.info('Moving %d attachments from %s to'
+ ' DB for fast access', total, storage)
+ current = 0
+ for attachment_id in attachment_ids:
+ current += 1
+ # if we browse attachments outside of the loop, the first
+ # access to 'datas' will compute all the 'datas' fields at
+ # once, which means reading hundreds or thousands of files at
+ # once, exhausting memory
+ attachment = model_env.browse(attachment_id)
+ # this write will read the datas from the Object Storage and
+ # write them back in the DB (the logic for location to write is
+ # in the 'datas' inverse computed field)
+ attachment.write({'datas': attachment.datas})
+ # as the file will potentially be dropped on the bucket,
+ # we should commit the changes here
+ new_env.cr.commit()
+ if current % 100 == 0 or total - current == 0:
+ _logger.info(
+ 'attachment %s/%s after %.2fs',
+ current, total,
+ time.time() - start_time
+ )
+
@api.model
def _force_storage_to_object_storage(self, new_cr=False):
_logger.info('migrating files to the object storage')
From c17e810b6220ff8f5c0ade31ba0a3d7a0f2a571e Mon Sep 17 00:00:00 2001
From: Guewen Baconnier
Date: Wed, 27 May 2020 15:39:58 +0200
Subject: [PATCH 19/47] Rework and fix storage forced in database
The initial issue that triggered this rework is that the forced storage in
database was working only on writes, and was never applied on attachment
creations.
This feature is used to store small files that need to be read in a fast way in
database rather than in the object storage. Reading a file from the object
storage can take 150-200ms, which is fine for downloading a PDF file or a single
image, but not if you need 40 thumbnails.
Down the path to make a correction, I found that:
* the logic to force storage was called in `_inverse_datas`, which is not called
during a create
* odoo implemented a new method `_get_datas_related_values`, which is a model
method that receive only the data and the mimetype, and return the attachment
values and write the file to the correct place
The `_get_datas_related_values` is where we want to plug this special storage,
as it is called for create and write, and already handle the values and
conditional write. But using this method, we have less information than before
about the attachment, so let's review the different criterias we had before:
* res_model: we were using it to always store attachments related to
'ir.ui.view' in db, because assets are related to this model. However, we
don't really need to check this: we should store any javascript and css
documents in database.
* exclude res_model: we could have an exclusion list, to tell that for instance,
for mail.message, we should never store any image in db. We don't have this
information anymore, but I think it was never used and added "in case of".
Because the default configuration is "mail.mail" and "mail.message" and I
couldn't find any attachment with such res_model in any of our biggest
databases. So this is removed.
* mimetype and data (size) are the last criteria and we still have them
The new system is only based on mimetype and data size and I think it's actually
more versatile. Previously, we could set a global size and include mimetypes,
but we couldn't say "I want to store all images below 50KB and all files of type
X below 10KB". Now, we have a single system parameter with a dict configuration
(`ir_attachment.storage.force.database`) defaulting to:
{"image/": 51200, "application/javascript": 0, "text/css": 0}
Assets have a limit of zero, which means they will all be stored in the database
whatever their size is.
Overall, this is a great simplification of the module too, as the method
`_get_datas_related_values` integrates it better in the base calls of IrAttachment.
Note for upgrade:
I doubt we customized the previous system parameters which are now obsolete, but
if yes, the configuration may need to be moved to `ir_attachment.storage.force.database`.
For the record, the params were:
* mimetypes.list.storedb (default: image)
* file.maxsize.storedb (default: 51200)
* excluded.models.storedb (mail.message,mail.mail), no equivalent now
The method IrAttachment.force_storage_to_db_for_special_fields() should be called
through a migration script on existing databases to move the attachments back into
the database.
---
base_attachment_object_storage/README.rst | 33 ++++
.../data/res_config_settings_data.xml | 15 +-
.../models/ir_attachment.py | 181 ++++++++++--------
3 files changed, 133 insertions(+), 96 deletions(-)
diff --git a/base_attachment_object_storage/README.rst b/base_attachment_object_storage/README.rst
index c802fafca5..176459f554 100644
--- a/base_attachment_object_storage/README.rst
+++ b/base_attachment_object_storage/README.rst
@@ -3,5 +3,38 @@ Base class for attachments on external object store
This is a base addon that regroup common code used by addons targeting specific object store
+Configuration
+-------------
+Object storage may be slow, and for this reason, we want to store
+some files in the database whatever.
+Small images (128, 256) are used in Odoo in list / kanban views. We
+want them to be fast to read.
+They are generally < 50KB (default configuration) so they don't take
+that much space in database, but they'll be read much faster than from
+the object storage.
+
+The assets (application/javascript, text/css) are stored in database
+as well whatever their size is:
+
+* a database doesn't have thousands of them
+* of course better for performance
+* better portability of a database: when replicating a production
+ instance for dev, the assets are included
+
+This storage configuration can be modified in the system parameter
+``ir_attachment.storage.force.database``, as a JSON value, for instance::
+
+ {"image/": 51200, "application/javascript": 0, "text/css": 0}
+
+Where the key is the beginning of the mimetype to configure and the
+value is the limit in size below which attachments are kept in DB.
+0 means no limit.
+
+Default configuration means:
+
+* images mimetypes (image/png, image/jpeg, ...) below 50KB are
+ stored in database
+* application/javascript are stored in database whatever their size
+* text/css are stored in database whatever their size
diff --git a/base_attachment_object_storage/data/res_config_settings_data.xml b/base_attachment_object_storage/data/res_config_settings_data.xml
index 199d1f57ae..76c6961d93 100644
--- a/base_attachment_object_storage/data/res_config_settings_data.xml
+++ b/base_attachment_object_storage/data/res_config_settings_data.xml
@@ -1,16 +1,9 @@
-
- mimetypes.list.storedb
- image
-
-
- file.maxsize.storedb
- 50000
-
-
- excluded.models.storedb
- mail.message,mail.mail
+
+
+ ir_attachment.storage.force.database
+ {"image/": 51200, "application/javascript": 0, "text/css": 0}
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 1b0122d35e..260c6cae88 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -12,8 +12,8 @@
from contextlib import closing, contextmanager
from odoo import api, exceptions, models, _
-from odoo.tools.mimetypes import guess_mimetype
-from odoo.osv.expression import AND, normalize_domain
+from odoo.osv.expression import AND, OR, normalize_domain
+from odoo.tools.safe_eval import const_eval
_logger = logging.getLogger(__name__)
@@ -68,109 +68,114 @@ def _register_hook(self):
if update_module:
self.env['ir.attachment'].sudo()._force_storage_to_object_storage()
- @api.model
- def _save_in_db_domain(self):
+ @property
+ def _object_storage_default_force_db_config(self):
+ return {"image/": 51200, "application/javascript": 0, "text/css": 0}
+
+ def _get_storage_force_db_config(self):
+ param = self.env['ir.config_parameter'].sudo().get_param(
+ 'ir_attachment.storage.force.database',
+ )
+ storage_config = None
+ if param:
+ try:
+ storage_config = const_eval(param)
+ except (SyntaxError, TypeError, ValueError):
+ _logger.exception(
+ "Could not parse system parameter"
+ " 'ir_attachment.storage.force.database', reverting to the"
+ " default configuration.")
+
+ if not storage_config:
+ storage_config = self._object_storage_default_force_db_config
+ return storage_config
+
+ def _store_in_db_instead_of_object_storage_domain(self):
"""Return a domain for attachments that must be forced to DB
- Read the docstring of ``_save_in_db_anyway`` for more details.
+ Read the docstring of ``_store_in_db_instead_of_object_storage`` for
+ more details.
+
+ Used in ``force_storage_to_db_for_special_fields`` to find records
+ to move from the object storage to the database.
The domain must be inline with the conditions in
- ``_save_in_db_anyway``.
+ ``_store_in_db_instead_of_object_storage``.
"""
- excluded_model_settings = self.env['ir.config_parameter'].sudo().\
- get_param('excluded.models.storedb', default='')
- excluded_model_for_db_store = excluded_model_settings.split(',')
- mimetypes_settings = self.env['ir.config_parameter'].sudo().get_param(
- 'mimetypes.list.storedb', default='')
- mimetypes_for_db_store = mimetypes_settings.split(',')
- filesize = self.env['ir.config_parameter'].sudo().get_param(
- 'file.maxsize.storedb', default='0')
- domain = [
- '|',
- # assets are stored in 'ir.ui.view'
- ('res_model', '=', 'ir.ui.view'),
- '&', '&',
- ('file_size', '<', int(filesize)),
- ('res_model', 'not in', excluded_model_for_db_store),
- ]
- domain += ['|'] * (len(mimetypes_for_db_store) - 1)
- domain += [('mimetype', '=like', mimetype) for mimetype in
- mimetypes_for_db_store]
+ domain = []
+ storage_config = self._get_storage_force_db_config()
+ for mimetype_key, limit in storage_config.items():
+ part = [("mimetype", "=like", "{}%".format(mimetype_key))]
+ if limit:
+ part = AND([part, [("file_size", "<=", limit)]])
+ domain = OR([domain, part])
return domain
- def _save_in_db_anyway(self):
+ def _store_in_db_instead_of_object_storage(self, data, mimetype):
""" Return whether an attachment must be stored in db
- When we are using an Object Store. This is sometimes required
+ When we are using an Object Storage. This is sometimes required
because the object storage is slower than the database/filesystem.
- We store image_small and image_medium from 'Binary' fields
- because they should be fast to read as they are often displayed
- in kanbans / lists. The same for web_icon_data.
+ Small images (128, 256) are used in Odoo in list / kanban views. We
+ want them to be fast to read.
+ They are generally < 50KB (default configuration) so they don't take
+ that much space in database, but they'll be read much faster than from
+ the object storage.
+
+ The assets (application/javascript, text/css) are stored in database
+ as well whatever their size is:
+
+ * a database doesn't have thousands of them
+ * of course better for performance
+ * better portability of a database: when replicating a production
+ instance for dev, the assets are included
+
+ The configuration can be modified in the ir.config_parameter
+ ``ir_attachment.storage.force.database``, as a dictionary, for
+ instance::
- We store the assets locally as well. Not only for performance,
- but also because it improves the portability of the database:
- when assets are invalidated, they are deleted so we don't have
- an old database with attachments pointing to deleted assets.
+ {"image/": 51200, "application/javascript": 0, "text/css": 0}
+
+ Where the key is the beginning of the mimetype to configure and the
+ value is the limit in size below which attachments are kept in DB.
+ 0 means no limit.
+
+ Default configuration means:
+
+ * images mimetypes (image/png, image/jpeg, ...) below 51200 bytes are
+ stored in database
+ * application/javascript are stored in database whatever their size
+ * text/css are stored in database whatever their size
The conditions must be inline with the domain in
- ``_save_in_db_domain``.
+ ``_store_in_db_instead_of_object_storage_domain``.
"""
- self.ensure_one()
- # Note: we cannot use _save_in_db_domain because we can be working
- # with new records here. The conditions must stay inline though.
- # assets
- if self.res_model == 'ir.ui.view':
- # assets are stored in 'ir.ui.view'
- return True
- # Check if model must never be stored on DB
- excluded_model_settings = self.env['ir.config_parameter'].sudo().\
- get_param('excluded.models.storedb', default='')
- excluded_model_for_db_store = excluded_model_settings.split(',')
- if self.res_model in excluded_model_for_db_store:
- return False
- # Check if file size and mimetype fit requirements
- data_to_store = self.datas
- bin_data = base64.b64decode(data_to_store) if data_to_store else ''
- current_mimetype = guess_mimetype(bin_data)
- mimetypes_settings = self.env['ir.config_parameter'].sudo().get_param(
- 'mimetypes.list.storedb', default='')
- mimetypes_for_db_store = mimetypes_settings.split(',')
- if any(current_mimetype.startswith(val) for val in
- mimetypes_for_db_store):
- # get allowed size
- filesize = self.env['ir.config_parameter'].sudo().get_param(
- 'file.maxsize.storedb', default='0')
- if len(bin_data) < int(filesize):
- return True
+ storage_config = self._get_storage_force_db_config()
+ for mimetype_key, limit in storage_config.items():
+ if mimetype.startswith(mimetype_key):
+ if not limit:
+ return True
+ bin_data = base64.b64decode(data) if data else b''
+ return len(bin_data) <= limit
return False
- def _inverse_datas(self):
- # override in order to store files that need fast access,
- # we keep them in the database instead of the object storage
- location = self.env.context.get('storage_location') or self._storage()
- for attach in self:
- if location in self._get_stores() and attach._save_in_db_anyway():
+ def _get_datas_related_values(self, data, mimetype):
+ storage = self.env.context.get('storage_location') or self._storage()
+ if data and storage in self._get_stores():
+ if self._store_in_db_instead_of_object_storage(data, mimetype):
# compute the fields that depend on datas
- value = attach.datas
- bin_data = base64.b64decode(value) if value else ''
- vals = {
+ bin_data = base64.b64decode(data) if data else b''
+ values = {
'file_size': len(bin_data),
'checksum': self._compute_checksum(bin_data),
- 'db_datas': value,
- # we seriously don't need index content on those fields
- 'index_content': False,
+ 'index_content': self._index(bin_data, mimetype),
'store_fname': False,
+ 'db_datas': data,
}
- fname = attach.store_fname
- # write as superuser, as user probably does not
- # have write access
- super(IrAttachment, attach.sudo()).write(vals)
- if fname:
- self._file_delete(fname)
- continue
- super(IrAttachment, attach)._inverse_datas()
+ return values
+ return super()._get_datas_related_values(data, mimetype)
@api.model
def _file_read(self, fname, bin_size=False):
@@ -245,7 +250,7 @@ def do_in_new_env(self, new_cr=False):
with closing(registry.cursor()) as cr:
try:
yield self.env(cr=cr)
- except:
+ except Exception:
cr.rollback()
raise
else:
@@ -307,9 +312,15 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
domain = AND((
normalize_domain(
- [('store_fname', '=like', '{}://%'.format(storage))]
+ [('store_fname', '=like', '{}://%'.format(storage)),
+ # for res_field, see comment in
+ # _force_storage_to_object_storage
+ '|',
+ ('res_field', '=', False),
+ ('res_field', '!=', False),
+ ]
),
- normalize_domain(self._save_in_db_domain())
+ normalize_domain(self._store_in_db_instead_of_object_storage_domain())
))
with self.do_in_new_env(new_cr=new_cr) as new_env:
From ab9d8356bfa7f7364c7b64643f0c39ea0532e1a3 Mon Sep 17 00:00:00 2001
From: Denis Leemann
Date: Tue, 6 Oct 2020 10:54:24 +0200
Subject: [PATCH 20/47] Set module for 14.0 uninstallable
---
base_attachment_object_storage/__manifest__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index 76a1a13a4c..d2b57771af 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -11,6 +11,6 @@
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': ['data/res_config_settings_data.xml'],
- 'installable': True,
+ 'installable': False,
'auto_install': True,
}
From 4b2b37d14a00e71d2234294a9c488a4be20f8676 Mon Sep 17 00:00:00 2001
From: Patrick Tombez
Date: Tue, 3 Nov 2020 11:36:50 +0100
Subject: [PATCH 21/47] [MIG] base_attachment_object_storage: Migration to 14.0
---
base_attachment_object_storage/__manifest__.py | 4 ++--
.../models/ir_attachment.py | 13 ++++++-------
2 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index d2b57771af..e410519498 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -4,13 +4,13 @@
{'name': 'Base Attachment Object Store',
'summary': 'Base module for the implementation of external object store.',
- 'version': '13.0.1.1.0',
+ 'version': "14.0.1.0.0",
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Knowledge Management',
'depends': ['base'],
'website': 'http://www.camptocamp.com',
'data': ['data/res_config_settings_data.xml'],
- 'installable': False,
+ 'installable': True,
'auto_install': True,
}
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 260c6cae88..cd26db6527 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -178,13 +178,13 @@ def _get_datas_related_values(self, data, mimetype):
return super()._get_datas_related_values(data, mimetype)
@api.model
- def _file_read(self, fname, bin_size=False):
+ def _file_read(self, fname):
if self._is_file_from_a_store(fname):
- return self._store_file_read(fname, bin_size=bin_size)
+ return self._store_file_read(fname)
else:
- return super()._file_read(fname, bin_size=bin_size)
+ return super()._file_read(fname)
- def _store_file_read(self, fname, bin_size=False):
+ def _store_file_read(self, fname):
storage = fname.partition('://')[0]
raise NotImplementedError(
'No implementation for %s' % (storage,)
@@ -202,16 +202,15 @@ def _store_file_delete(self, fname):
)
@api.model
- def _file_write(self, value, checksum):
+ def _file_write(self, bin_data, checksum):
location = self.env.context.get('storage_location') or self._storage()
if location in self._get_stores():
- bin_data = base64.b64decode(value)
key = self.env.context.get('force_storage_key')
if not key:
key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
else:
- filename = super()._file_write(value, checksum)
+ filename = super()._file_write(bin_data, checksum)
return filename
@api.model
From 5d2850961e666dd2e6f10ced810f9be51100163c Mon Sep 17 00:00:00 2001
From: Don Kendall
Date: Wed, 4 Nov 2020 09:39:06 -0500
Subject: [PATCH 22/47] remove base64 from base_attachment
---
base_attachment_object_storage/models/ir_attachment.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index cd26db6527..ea9283f78f 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -1,7 +1,6 @@
# Copyright 2017-2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
-import base64
import inspect
import logging
import os
@@ -157,7 +156,7 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
if mimetype.startswith(mimetype_key):
if not limit:
return True
- bin_data = base64.b64decode(data) if data else b''
+ bin_data = data
return len(bin_data) <= limit
return False
@@ -166,7 +165,7 @@ def _get_datas_related_values(self, data, mimetype):
if data and storage in self._get_stores():
if self._store_in_db_instead_of_object_storage(data, mimetype):
# compute the fields that depend on datas
- bin_data = base64.b64decode(data) if data else b''
+ bin_data = data
values = {
'file_size': len(bin_data),
'checksum': self._compute_checksum(bin_data),
From e81636c21db75e8557d29bab2618874a393c0afc Mon Sep 17 00:00:00 2001
From: Denis Leemann
Date: Mon, 18 Oct 2021 12:50:19 +0200
Subject: [PATCH 23/47] 15.0 Modules migration
---
base_attachment_object_storage/__manifest__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index e410519498..3f073b0f77 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -1,10 +1,10 @@
-# Copyright 2017-2019 Camptocamp SA
+# Copyright 2017-2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
{'name': 'Base Attachment Object Store',
'summary': 'Base module for the implementation of external object store.',
- 'version': "14.0.1.0.0",
+ 'version': "15.0.1.0.0",
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Knowledge Management',
From d3cdfd4f4cce05e894fe303e7a7ad118f56c214f Mon Sep 17 00:00:00 2001
From: Denis Leemann
Date: Mon, 18 Oct 2021 12:57:19 +0200
Subject: [PATCH 24/47] Update manifest files to be consistent inbetween them
The main goal is to be able to easily do grep and sed when we do mass update
on them
---
.../__manifest__.py | 22 +++++++++----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index 3f073b0f77..9605e9bafc 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -2,15 +2,15 @@
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
-{'name': 'Base Attachment Object Store',
- 'summary': 'Base module for the implementation of external object store.',
- 'version': "15.0.1.0.0",
- 'author': 'Camptocamp,Odoo Community Association (OCA)',
- 'license': 'AGPL-3',
- 'category': 'Knowledge Management',
- 'depends': ['base'],
- 'website': 'http://www.camptocamp.com',
- 'data': ['data/res_config_settings_data.xml'],
- 'installable': True,
- 'auto_install': True,
+{"name": "Base Attachment Object Store",
+ "summary": "Base module for the implementation of external object store.",
+ "version": "15.0.1.0.0",
+ "author": "Camptocamp,Odoo Community Association (OCA)",
+ "license": "AGPL-3",
+ "category": "Knowledge Management",
+ "depends": ["base"],
+ "website": "http://www.camptocamp.com",
+ "data": ["data/res_config_settings_data.xml"],
+ "installable": True,
+ "auto_install": True,
}
From cb6eb96e6a56d260408faf5fcb0e7e51b0167e5d Mon Sep 17 00:00:00 2001
From: Stephane Mangin
Date: Wed, 13 Apr 2022 12:43:20 +0200
Subject: [PATCH 25/47] Object Storage - inactive mode
---
base_attachment_object_storage/README.rst | 8 ++++-
.../models/ir_attachment.py | 31 ++++++++++++++++++-
2 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/base_attachment_object_storage/README.rst b/base_attachment_object_storage/README.rst
index 176459f554..48dfdd32d8 100644
--- a/base_attachment_object_storage/README.rst
+++ b/base_attachment_object_storage/README.rst
@@ -1,7 +1,7 @@
Base class for attachments on external object store
===================================================
-This is a base addon that regroup common code used by addons targeting specific object store
+This is a base addon that regroup common code used by addons targeting specific object store
Configuration
-------------
@@ -38,3 +38,9 @@ Default configuration means:
stored in database
* application/javascript are stored in database whatever their size
* text/css are stored in database whatever their size
+
+Inactivate attachment storage I/O
+---------------------------------
+
+Define a environment variable `ATTACHMENT_STORAGE_INACTIVE` set to `1`
+This will prevent any kind of exceptions and read/write on storage attachments.
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index ea9283f78f..8813ea445b 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -5,6 +5,7 @@
import logging
import os
import time
+from distutils.util import strtobool
import psycopg2
import odoo
@@ -18,6 +19,10 @@
_logger = logging.getLogger(__name__)
+def is_true(strval):
+ return bool(strtobool(strval or '0'))
+
+
def clean_fs(files):
_logger.info('cleaning old files from filestore')
for full_path in files:
@@ -40,6 +45,18 @@ def clean_fs(files):
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
+ @staticmethod
+ def is_storage_inactive(storage=None, log=True):
+ msg = _("Storages are inactive (see environment configuration).")
+ if storage:
+ msg = _(
+ "Storage '%s' is inactive (see environment configuration)."
+ ) % (storage,)
+ is_inactive = is_true(os.environ.get("ATTACHMENT_STORAGE_INACTIVE"))
+ if is_inactive and log:
+ _logger.warning(msg)
+ return is_inactive
+
def _register_hook(self):
super()._register_hook()
location = self.env.context.get('storage_location') or self._storage()
@@ -151,6 +168,8 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
``_store_in_db_instead_of_object_storage_domain``.
"""
+ if self.is_storage_inactive():
+ return True
storage_config = self._get_storage_force_db_config()
for mimetype_key, limit in storage_config.items():
if mimetype.startswith(mimetype_key):
@@ -190,8 +209,9 @@ def _store_file_read(self, fname):
)
def _store_file_write(self, key, bin_data):
+ storage = self.storage()
raise NotImplementedError(
- 'No implementation for %s' % (self.storage(),)
+ 'No implementation for %s' % (storage,)
)
def _store_file_delete(self, fname):
@@ -229,6 +249,8 @@ def _file_delete(self, fname):
@api.model
def _is_file_from_a_store(self, fname):
for store_name in self._get_stores():
+ if self.is_storage_inactive(store_name):
+ continue
uri = '{}://'.format(store_name)
if fname.startswith(uri):
return True
@@ -263,6 +285,9 @@ def _move_attachment_to_store(self):
self.ensure_one()
_logger.info('inspecting attachment %s (%d)', self.name, self.id)
fname = self.store_fname
+ storage = fname.partition('://')[0]
+ if self.is_storage_inactive(storage):
+ fname = False
if fname:
# migrating from filesystem filestore
# or from the old 'store_fname' without the bucket name
@@ -305,6 +330,8 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
It is not called anywhere, but can be called by RPC or scripts.
"""
storage = self._storage()
+ if self.is_storage_inactive(storage):
+ return
if storage not in self._get_stores():
return
@@ -358,6 +385,8 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
def _force_storage_to_object_storage(self, new_cr=False):
_logger.info('migrating files to the object storage')
storage = self.env.context.get('storage_location') or self._storage()
+ if self.is_storage_inactive(storage):
+ return
# The weird "res_field = False OR res_field != False" domain
# is required! It's because of an override of _search in ir.attachment
# which adds ('res_field', '=', False) when the domain does not
From f1602562104e5f902297a86f52b3b4b6b9f95da5 Mon Sep 17 00:00:00 2001
From: Stephane Mangin
Date: Mon, 9 May 2022 12:50:49 +0200
Subject: [PATCH 26/47] Object storage inactivation: changes INACTIVE concept
for DISABLE
---
base_attachment_object_storage/README.rst | 6 ++---
.../models/ir_attachment.py | 22 +++++++++----------
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/base_attachment_object_storage/README.rst b/base_attachment_object_storage/README.rst
index 48dfdd32d8..0ff25c997c 100644
--- a/base_attachment_object_storage/README.rst
+++ b/base_attachment_object_storage/README.rst
@@ -39,8 +39,8 @@ Default configuration means:
* application/javascript are stored in database whatever their size
* text/css are stored in database whatever their size
-Inactivate attachment storage I/O
----------------------------------
+Disable attachment storage I/O
+------------------------------
-Define a environment variable `ATTACHMENT_STORAGE_INACTIVE` set to `1`
+Define a environment variable `DISABLE_ATTACHMENT_STORAGE` set to `1`
This will prevent any kind of exceptions and read/write on storage attachments.
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 8813ea445b..ed43c69794 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -46,16 +46,16 @@ class IrAttachment(models.Model):
_inherit = 'ir.attachment'
@staticmethod
- def is_storage_inactive(storage=None, log=True):
- msg = _("Storages are inactive (see environment configuration).")
+ def is_storage_disabled(storage=None, log=True):
+ msg = _("Storages are disabled (see environment configuration).")
if storage:
msg = _(
- "Storage '%s' is inactive (see environment configuration)."
+ "Storage '%s' is disabled (see environment configuration)."
) % (storage,)
- is_inactive = is_true(os.environ.get("ATTACHMENT_STORAGE_INACTIVE"))
- if is_inactive and log:
+ is_disabled = is_true(os.environ.get("DISABLE_ATTACHMENT_STORAGE"))
+ if is_disabled and log:
_logger.warning(msg)
- return is_inactive
+ return is_disabled
def _register_hook(self):
super()._register_hook()
@@ -168,7 +168,7 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
``_store_in_db_instead_of_object_storage_domain``.
"""
- if self.is_storage_inactive():
+ if self.is_storage_disabled():
return True
storage_config = self._get_storage_force_db_config()
for mimetype_key, limit in storage_config.items():
@@ -249,7 +249,7 @@ def _file_delete(self, fname):
@api.model
def _is_file_from_a_store(self, fname):
for store_name in self._get_stores():
- if self.is_storage_inactive(store_name):
+ if self.is_storage_disabled(store_name):
continue
uri = '{}://'.format(store_name)
if fname.startswith(uri):
@@ -286,7 +286,7 @@ def _move_attachment_to_store(self):
_logger.info('inspecting attachment %s (%d)', self.name, self.id)
fname = self.store_fname
storage = fname.partition('://')[0]
- if self.is_storage_inactive(storage):
+ if self.is_storage_disabled(storage):
fname = False
if fname:
# migrating from filesystem filestore
@@ -330,7 +330,7 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
It is not called anywhere, but can be called by RPC or scripts.
"""
storage = self._storage()
- if self.is_storage_inactive(storage):
+ if self.is_storage_disabled(storage):
return
if storage not in self._get_stores():
return
@@ -385,7 +385,7 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
def _force_storage_to_object_storage(self, new_cr=False):
_logger.info('migrating files to the object storage')
storage = self.env.context.get('storage_location') or self._storage()
- if self.is_storage_inactive(storage):
+ if self.is_storage_disabled(storage):
return
# The weird "res_field = False OR res_field != False" domain
# is required! It's because of an override of _search in ir.attachment
From 660827ff9937f2d4b6491f867500d6bb6b344d2b Mon Sep 17 00:00:00 2001
From: vrenaville
Date: Mon, 26 Sep 2022 10:12:15 +0200
Subject: [PATCH 27/47] feat: v16.0 : all modules uninstallable
---
.../__manifest__.py | 25 ++++++++++---------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index 9605e9bafc..fbcac49088 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -2,15 +2,16 @@
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
-{"name": "Base Attachment Object Store",
- "summary": "Base module for the implementation of external object store.",
- "version": "15.0.1.0.0",
- "author": "Camptocamp,Odoo Community Association (OCA)",
- "license": "AGPL-3",
- "category": "Knowledge Management",
- "depends": ["base"],
- "website": "http://www.camptocamp.com",
- "data": ["data/res_config_settings_data.xml"],
- "installable": True,
- "auto_install": True,
- }
+{
+ "name": "Base Attachment Object Store",
+ "summary": "Base module for the implementation of external object store.",
+ "version": "15.0.1.0.0",
+ "author": "Camptocamp,Odoo Community Association (OCA)",
+ "license": "AGPL-3",
+ "category": "Knowledge Management",
+ "depends": ["base"],
+ "website": "http://www.camptocamp.com",
+ "data": ["data/res_config_settings_data.xml"],
+ "installable": False,
+ "auto_install": True,
+}
From b675970d7ae56e1a469a3c21cb14dc6b05a58687 Mon Sep 17 00:00:00 2001
From: Vincent Renaville
Date: Mon, 26 Sep 2022 11:31:17 +0200
Subject: [PATCH 28/47] fix: modifition setup (#386)
---
base_attachment_object_storage/__manifest__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/base_attachment_object_storage/__manifest__.py b/base_attachment_object_storage/__manifest__.py
index fbcac49088..39cc63aab3 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/base_attachment_object_storage/__manifest__.py
@@ -5,13 +5,13 @@
{
"name": "Base Attachment Object Store",
"summary": "Base module for the implementation of external object store.",
- "version": "15.0.1.0.0",
+ "version": "16.0.1.0.0",
"author": "Camptocamp,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Knowledge Management",
"depends": ["base"],
"website": "http://www.camptocamp.com",
"data": ["data/res_config_settings_data.xml"],
- "installable": False,
+ "installable": True,
"auto_install": True,
}
From 46d3141d42dfb082fc230f143d2d8c33a00b4d89 Mon Sep 17 00:00:00 2001
From: Vincent Renaville
Date: Fri, 4 Nov 2022 14:34:29 +0100
Subject: [PATCH 29/47] fix: dependencies and deprecated code (#390)
---
.../models/ir_attachment.py | 2 +-
.../models/strtobool.py | 21 +++++++++++++++++++
2 files changed, 22 insertions(+), 1 deletion(-)
create mode 100644 base_attachment_object_storage/models/strtobool.py
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index ed43c69794..425bb9e4a2 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -5,7 +5,7 @@
import logging
import os
import time
-from distutils.util import strtobool
+from .strtobool import strtobool
import psycopg2
import odoo
diff --git a/base_attachment_object_storage/models/strtobool.py b/base_attachment_object_storage/models/strtobool.py
new file mode 100644
index 0000000000..44d1eb2cc3
--- /dev/null
+++ b/base_attachment_object_storage/models/strtobool.py
@@ -0,0 +1,21 @@
+_MAP = {
+ 'y': True,
+ 'yes': True,
+ 't': True,
+ 'true': True,
+ 'on': True,
+ '1': True,
+ 'n': False,
+ 'no': False,
+ 'f': False,
+ 'false': False,
+ 'off': False,
+ '0': False
+}
+
+
+def strtobool(value):
+ try:
+ return _MAP[str(value).lower()]
+ except KeyError:
+ raise ValueError('"{}" is not a valid bool value'.format(value))
From 76e81dc5d3138ba5fb613be7b0290a96f5e36ce1 Mon Sep 17 00:00:00 2001
From: Vincent Renaville
Date: Fri, 11 Nov 2022 15:17:03 +0100
Subject: [PATCH 30/47] feat: remove after method (#393)
* fix: azure reading in stream monkey patch documents
---
.../models/ir_attachment.py | 206 +++++++++---------
1 file changed, 107 insertions(+), 99 deletions(-)
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/base_attachment_object_storage/models/ir_attachment.py
index 425bb9e4a2..a4e3ec9d98 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/base_attachment_object_storage/models/ir_attachment.py
@@ -20,38 +20,36 @@
def is_true(strval):
- return bool(strtobool(strval or '0'))
+ return bool(strtobool(strval or "0"))
def clean_fs(files):
- _logger.info('cleaning old files from filestore')
+ _logger.info("cleaning old files from filestore")
for full_path in files:
if os.path.exists(full_path):
try:
os.unlink(full_path)
except OSError:
_logger.info(
- "_file_delete could not unlink %s",
- full_path, exc_info=True
+ "_file_delete could not unlink %s", full_path, exc_info=True
)
except IOError:
# Harmless and needed for race conditions
_logger.info(
- "_file_delete could not unlink %s",
- full_path, exc_info=True
+ "_file_delete could not unlink %s", full_path, exc_info=True
)
class IrAttachment(models.Model):
- _inherit = 'ir.attachment'
+ _inherit = "ir.attachment"
@staticmethod
def is_storage_disabled(storage=None, log=True):
msg = _("Storages are disabled (see environment configuration).")
if storage:
- msg = _(
- "Storage '%s' is disabled (see environment configuration)."
- ) % (storage,)
+ msg = _("Storage '%s' is disabled (see environment configuration).") % (
+ storage,
+ )
is_disabled = is_true(os.environ.get("DISABLE_ATTACHMENT_STORAGE"))
if is_disabled and log:
_logger.warning(msg)
@@ -59,7 +57,7 @@ def is_storage_disabled(storage=None, log=True):
def _register_hook(self):
super()._register_hook()
- location = self.env.context.get('storage_location') or self._storage()
+ location = self.env.context.get("storage_location") or self._storage()
# ignore if we are not using an object storage
if location not in self._get_stores():
return
@@ -73,7 +71,7 @@ def _register_hook(self):
# done during the initialization. We need to move the attachments that
# could have been created or updated in other addons before this addon
# was loaded
- update_module = load_modules_frame.f_locals.get('update_module')
+ update_module = load_modules_frame.f_locals.get("update_module")
# We need to call the migration on the loading of the model because
# when we are upgrading addons, some of them might add attachments.
@@ -82,15 +80,19 @@ def _register_hook(self):
# Typical example is images of ir.ui.menu which are updated in
# ir.attachment at every upgrade of the addons
if update_module:
- self.env['ir.attachment'].sudo()._force_storage_to_object_storage()
+ self.env["ir.attachment"].sudo()._force_storage_to_object_storage()
@property
def _object_storage_default_force_db_config(self):
return {"image/": 51200, "application/javascript": 0, "text/css": 0}
def _get_storage_force_db_config(self):
- param = self.env['ir.config_parameter'].sudo().get_param(
- 'ir_attachment.storage.force.database',
+ param = (
+ self.env["ir.config_parameter"]
+ .sudo()
+ .get_param(
+ "ir_attachment.storage.force.database",
+ )
)
storage_config = None
if param:
@@ -100,7 +102,8 @@ def _get_storage_force_db_config(self):
_logger.exception(
"Could not parse system parameter"
" 'ir_attachment.storage.force.database', reverting to the"
- " default configuration.")
+ " default configuration."
+ )
if not storage_config:
storage_config = self._object_storage_default_force_db_config
@@ -128,7 +131,7 @@ def _store_in_db_instead_of_object_storage_domain(self):
return domain
def _store_in_db_instead_of_object_storage(self, data, mimetype):
- """ Return whether an attachment must be stored in db
+ """Return whether an attachment must be stored in db
When we are using an Object Storage. This is sometimes required
because the object storage is slower than the database/filesystem.
@@ -180,17 +183,17 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
return False
def _get_datas_related_values(self, data, mimetype):
- storage = self.env.context.get('storage_location') or self._storage()
+ storage = self.env.context.get("storage_location") or self._storage()
if data and storage in self._get_stores():
if self._store_in_db_instead_of_object_storage(data, mimetype):
# compute the fields that depend on datas
bin_data = data
values = {
- 'file_size': len(bin_data),
- 'checksum': self._compute_checksum(bin_data),
- 'index_content': self._index(bin_data, mimetype),
- 'store_fname': False,
- 'db_datas': data,
+ "file_size": len(bin_data),
+ "checksum": self._compute_checksum(bin_data),
+ "index_content": self._index(bin_data, mimetype),
+ "store_fname": False,
+ "db_datas": data,
}
return values
return super()._get_datas_related_values(data, mimetype)
@@ -203,28 +206,22 @@ def _file_read(self, fname):
return super()._file_read(fname)
def _store_file_read(self, fname):
- storage = fname.partition('://')[0]
- raise NotImplementedError(
- 'No implementation for %s' % (storage,)
- )
+ storage = fname.partition("://")[0]
+ raise NotImplementedError("No implementation for %s" % (storage,))
def _store_file_write(self, key, bin_data):
storage = self.storage()
- raise NotImplementedError(
- 'No implementation for %s' % (storage,)
- )
+ raise NotImplementedError("No implementation for %s" % (storage,))
def _store_file_delete(self, fname):
- storage = fname.partition('://')[0]
- raise NotImplementedError(
- 'No implementation for %s' % (storage,)
- )
+ storage = fname.partition("://")[0]
+ raise NotImplementedError("No implementation for %s" % (storage,))
@api.model
def _file_write(self, bin_data, checksum):
- location = self.env.context.get('storage_location') or self._storage()
+ location = self.env.context.get("storage_location") or self._storage()
if location in self._get_stores():
- key = self.env.context.get('force_storage_key')
+ key = self.env.context.get("force_storage_key")
if not key:
key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
@@ -238,8 +235,9 @@ def _file_delete(self, fname):
cr = self.env.cr
# using SQL to include files hidden through unlink or due to record
# rules
- cr.execute("SELECT COUNT(*) FROM ir_attachment "
- "WHERE store_fname = %s", (fname,))
+ cr.execute(
+ "SELECT COUNT(*) FROM ir_attachment " "WHERE store_fname = %s", (fname,)
+ )
count = cr.fetchone()[0]
if not count:
self._store_file_delete(fname)
@@ -251,22 +249,20 @@ def _is_file_from_a_store(self, fname):
for store_name in self._get_stores():
if self.is_storage_disabled(store_name):
continue
- uri = '{}://'.format(store_name)
+ uri = "{}://".format(store_name)
if fname.startswith(uri):
return True
return False
@contextmanager
def do_in_new_env(self, new_cr=False):
- """ Context manager that yields a new environment
+ """Context manager that yields a new environment
Using a new Odoo Environment thus a new PG transaction.
"""
with api.Environment.manage():
if new_cr:
- registry = odoo.modules.registry.Registry.new(
- self.env.cr.dbname
- )
+ registry = odoo.modules.registry.Registry.new(self.env.cr.dbname)
with closing(registry.cursor()) as cr:
try:
yield self.env(cr=cr)
@@ -283,33 +279,38 @@ def do_in_new_env(self, new_cr=False):
def _move_attachment_to_store(self):
self.ensure_one()
- _logger.info('inspecting attachment %s (%d)', self.name, self.id)
+ _logger.info("inspecting attachment %s (%d)", self.name, self.id)
fname = self.store_fname
- storage = fname.partition('://')[0]
+ storage = fname.partition("://")[0]
if self.is_storage_disabled(storage):
fname = False
if fname:
# migrating from filesystem filestore
# or from the old 'store_fname' without the bucket name
- _logger.info('moving %s on the object storage', fname)
- self.write({'datas': self.datas,
- # this is required otherwise the
- # mimetype gets overriden with
- # 'application/octet-stream'
- # on assets
- 'mimetype': self.mimetype})
- _logger.info('moved %s on the object storage', fname)
+ _logger.info("moving %s on the object storage", fname)
+ self.write(
+ {
+ "datas": self.datas,
+ # this is required otherwise the
+ # mimetype gets overriden with
+ # 'application/octet-stream'
+ # on assets
+ "mimetype": self.mimetype,
+ }
+ )
+ _logger.info("moved %s on the object storage", fname)
return self._full_path(fname)
elif self.db_datas:
- _logger.info('moving on the object storage from database')
- self.write({'datas': self.datas})
+ _logger.info("moving on the object storage from database")
+ self.write({"datas": self.datas})
@api.model
def force_storage(self):
- if not self.env['res.users'].browse(self.env.uid)._is_admin():
+ if not self.env["res.users"].browse(self.env.uid)._is_admin():
raise exceptions.AccessError(
- _('Only administrators can execute this action.'))
- location = self.env.context.get('storage_location') or self._storage()
+ _("Only administrators can execute this action.")
+ )
+ location = self.env.context.get("storage_location") or self._storage()
if location not in self._get_stores():
return super().force_storage()
self._force_storage_to_object_storage()
@@ -335,30 +336,32 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
if storage not in self._get_stores():
return
- domain = AND((
- normalize_domain(
- [('store_fname', '=like', '{}://%'.format(storage)),
- # for res_field, see comment in
- # _force_storage_to_object_storage
- '|',
- ('res_field', '=', False),
- ('res_field', '!=', False),
- ]
- ),
- normalize_domain(self._store_in_db_instead_of_object_storage_domain())
- ))
+ domain = AND(
+ (
+ normalize_domain(
+ [
+ ("store_fname", "=like", "{}://%".format(storage)),
+ # for res_field, see comment in
+ # _force_storage_to_object_storage
+ "|",
+ ("res_field", "=", False),
+ ("res_field", "!=", False),
+ ]
+ ),
+ normalize_domain(self._store_in_db_instead_of_object_storage_domain()),
+ )
+ )
with self.do_in_new_env(new_cr=new_cr) as new_env:
- model_env = new_env['ir.attachment'].with_context(
- prefetch_fields=False
- )
+ model_env = new_env["ir.attachment"].with_context(prefetch_fields=False)
attachment_ids = model_env.search(domain).ids
if not attachment_ids:
return
total = len(attachment_ids)
start_time = time.time()
- _logger.info('Moving %d attachments from %s to'
- ' DB for fast access', total, storage)
+ _logger.info(
+ "Moving %d attachments from %s to" " DB for fast access", total, storage
+ )
current = 0
for attachment_id in attachment_ids:
current += 1
@@ -370,21 +373,22 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
# this write will read the datas from the Object Storage and
# write them back in the DB (the logic for location to write is
# in the 'datas' inverse computed field)
- attachment.write({'datas': attachment.datas})
+ attachment.write({"datas": attachment.datas})
# as the file will potentially be dropped on the bucket,
# we should commit the changes here
new_env.cr.commit()
if current % 100 == 0 or total - current == 0:
_logger.info(
- 'attachment %s/%s after %.2fs',
- current, total,
- time.time() - start_time
+ "attachment %s/%s after %.2fs",
+ current,
+ total,
+ time.time() - start_time,
)
@api.model
def _force_storage_to_object_storage(self, new_cr=False):
- _logger.info('migrating files to the object storage')
- storage = self.env.context.get('storage_location') or self._storage()
+ _logger.info("migrating files to the object storage")
+ storage = self.env.context.get("storage_location") or self._storage()
if self.is_storage_disabled(storage):
return
# The weird "res_field = False OR res_field != False" domain
@@ -392,16 +396,19 @@ def _force_storage_to_object_storage(self, new_cr=False):
# which adds ('res_field', '=', False) when the domain does not
# contain 'res_field'.
# https://github.com/odoo/odoo/blob/9032617120138848c63b3cfa5d1913c5e5ad76db/odoo/addons/base/ir/ir_attachment.py#L344-L347
- domain = ['!', ('store_fname', '=like', '{}://%'.format(storage)),
- '|',
- ('res_field', '=', False),
- ('res_field', '!=', False)]
+ domain = [
+ "!",
+ ("store_fname", "=like", "{}://%".format(storage)),
+ "|",
+ ("res_field", "=", False),
+ ("res_field", "!=", False),
+ ]
# We do a copy of the environment so we can workaround the cache issue
# below. We do not create a new cursor by default because it causes
# serialization issues due to concurrent updates on attachments during
# the installation
with self.do_in_new_env(new_cr=new_cr) as new_env:
- model_env = new_env['ir.attachment']
+ model_env = new_env["ir.attachment"]
ids = model_env.search(domain).ids
files_to_clean = []
for attachment_id in ids:
@@ -410,12 +417,14 @@ def _force_storage_to_object_storage(self, new_cr=False):
# check that no other transaction has
# locked the row, don't send a file to storage
# in that case
- self.env.cr.execute("SELECT id "
- "FROM ir_attachment "
- "WHERE id = %s "
- "FOR UPDATE NOWAIT",
- (attachment_id,),
- log_exceptions=False)
+ self.env.cr.execute(
+ "SELECT id "
+ "FROM ir_attachment "
+ "WHERE id = %s "
+ "FOR UPDATE NOWAIT",
+ (attachment_id,),
+ log_exceptions=False,
+ )
# This is a trick to avoid having the 'datas'
# function fields computed for every attachment on
@@ -428,17 +437,16 @@ def _force_storage_to_object_storage(self, new_cr=False):
if path:
files_to_clean.append(path)
except psycopg2.OperationalError:
- _logger.error('Could not migrate attachment %s to S3',
- attachment_id)
-
- def clean():
- clean_fs(files_to_clean)
+ _logger.error(
+ "Could not migrate attachment %s to S3", attachment_id
+ )
# delete the files from the filesystem once we know the changes
# have been committed in ir.attachment
if files_to_clean:
- new_env.cr.after('commit', clean)
+ new_env.cr.commit()
+ clean_fs(files_to_clean)
def _get_stores(self):
- """ To get the list of stores activated in the system """
+ """To get the list of stores activated in the system"""
return []
From 8ca92957d01a4ad3e399915b7e7a5e6818e69a99 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Fri, 7 Apr 2023 18:20:03 +0200
Subject: [PATCH 31/47] [ADD] fs_attachment: Store attachment through fsspec
---
.../data/res_config_settings_data.xml | 9 -
.../models/strtobool.py | 21 ---
.../README.rst | 0
.../__init__.py | 0
.../__manifest__.py | 10 +-
.../data/res_config_settings_data.xml | 11 ++
.../models/__init__.py | 0
.../models/ir_attachment.py | 175 +++++++++++++-----
fs_attachment/models/strtobool.py | 21 +++
fs_attachment/readme/CONTRIBUTORS.rst | 11 ++
fs_attachment/readme/DESCRIPTION.rst | 1 +
fs_attachment/readme/USAGE.rst | 41 ++++
fs_attachment/tests/__init__.py | 1 +
fs_attachment/tests/test_fs_attachment.py | 88 +++++++++
setup/fs_attachment/odoo/addons/fs_attachment | 1 +
setup/fs_attachment/setup.py | 6 +
test-requirements.txt | 1 +
17 files changed, 313 insertions(+), 84 deletions(-)
delete mode 100644 base_attachment_object_storage/data/res_config_settings_data.xml
delete mode 100644 base_attachment_object_storage/models/strtobool.py
rename {base_attachment_object_storage => fs_attachment}/README.rst (100%)
rename {base_attachment_object_storage => fs_attachment}/__init__.py (100%)
rename {base_attachment_object_storage => fs_attachment}/__manifest__.py (57%)
create mode 100644 fs_attachment/data/res_config_settings_data.xml
rename {base_attachment_object_storage => fs_attachment}/models/__init__.py (100%)
rename {base_attachment_object_storage => fs_attachment}/models/ir_attachment.py (77%)
create mode 100644 fs_attachment/models/strtobool.py
create mode 100644 fs_attachment/readme/CONTRIBUTORS.rst
create mode 100644 fs_attachment/readme/DESCRIPTION.rst
create mode 100644 fs_attachment/readme/USAGE.rst
create mode 100644 fs_attachment/tests/__init__.py
create mode 100644 fs_attachment/tests/test_fs_attachment.py
create mode 120000 setup/fs_attachment/odoo/addons/fs_attachment
create mode 100644 setup/fs_attachment/setup.py
diff --git a/base_attachment_object_storage/data/res_config_settings_data.xml b/base_attachment_object_storage/data/res_config_settings_data.xml
deleted file mode 100644
index 76c6961d93..0000000000
--- a/base_attachment_object_storage/data/res_config_settings_data.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
- ir_attachment.storage.force.database
- {"image/": 51200, "application/javascript": 0, "text/css": 0}
-
-
-
diff --git a/base_attachment_object_storage/models/strtobool.py b/base_attachment_object_storage/models/strtobool.py
deleted file mode 100644
index 44d1eb2cc3..0000000000
--- a/base_attachment_object_storage/models/strtobool.py
+++ /dev/null
@@ -1,21 +0,0 @@
-_MAP = {
- 'y': True,
- 'yes': True,
- 't': True,
- 'true': True,
- 'on': True,
- '1': True,
- 'n': False,
- 'no': False,
- 'f': False,
- 'false': False,
- 'off': False,
- '0': False
-}
-
-
-def strtobool(value):
- try:
- return _MAP[str(value).lower()]
- except KeyError:
- raise ValueError('"{}" is not a valid bool value'.format(value))
diff --git a/base_attachment_object_storage/README.rst b/fs_attachment/README.rst
similarity index 100%
rename from base_attachment_object_storage/README.rst
rename to fs_attachment/README.rst
diff --git a/base_attachment_object_storage/__init__.py b/fs_attachment/__init__.py
similarity index 100%
rename from base_attachment_object_storage/__init__.py
rename to fs_attachment/__init__.py
diff --git a/base_attachment_object_storage/__manifest__.py b/fs_attachment/__manifest__.py
similarity index 57%
rename from base_attachment_object_storage/__manifest__.py
rename to fs_attachment/__manifest__.py
index 39cc63aab3..82a5053e56 100644
--- a/base_attachment_object_storage/__manifest__.py
+++ b/fs_attachment/__manifest__.py
@@ -4,14 +4,14 @@
{
"name": "Base Attachment Object Store",
- "summary": "Base module for the implementation of external object store.",
+ "summary": "Stora attachments on external object store",
"version": "16.0.1.0.0",
- "author": "Camptocamp,Odoo Community Association (OCA)",
+ "author": "Camptocamp, ACSONE SA/NV, Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Knowledge Management",
- "depends": ["base"],
- "website": "http://www.camptocamp.com",
+ "depends": ["fs_storage"],
+ "website": "https://github.com/OCA/storage",
"data": ["data/res_config_settings_data.xml"],
"installable": True,
- "auto_install": True,
+ "auto_install": False,
}
diff --git a/fs_attachment/data/res_config_settings_data.xml b/fs_attachment/data/res_config_settings_data.xml
new file mode 100644
index 0000000000..d9609103c5
--- /dev/null
+++ b/fs_attachment/data/res_config_settings_data.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
diff --git a/base_attachment_object_storage/models/__init__.py b/fs_attachment/models/__init__.py
similarity index 100%
rename from base_attachment_object_storage/models/__init__.py
rename to fs_attachment/models/__init__.py
diff --git a/base_attachment_object_storage/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
similarity index 77%
rename from base_attachment_object_storage/models/ir_attachment.py
rename to fs_attachment/models/ir_attachment.py
index a4e3ec9d98..e82a4fab8b 100644
--- a/base_attachment_object_storage/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -1,20 +1,22 @@
-# Copyright 2017-2019 Camptocamp SA
+# Copyright 2017-2013 Camptocamp SA
+# Copyright 2023 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
-import inspect
+import io
import logging
import os
import time
-from .strtobool import strtobool
+from contextlib import closing, contextmanager
+import fsspec # pylint: disable=missing-manifest-dependency
import psycopg2
-import odoo
-from contextlib import closing, contextmanager
-from odoo import api, exceptions, models, _
+import odoo
+from odoo import _, api, exceptions, models
from odoo.osv.expression import AND, OR, normalize_domain
from odoo.tools.safe_eval import const_eval
+from .strtobool import strtobool
_logger = logging.getLogger(__name__)
@@ -55,33 +57,6 @@ def is_storage_disabled(storage=None, log=True):
_logger.warning(msg)
return is_disabled
- def _register_hook(self):
- super()._register_hook()
- location = self.env.context.get("storage_location") or self._storage()
- # ignore if we are not using an object storage
- if location not in self._get_stores():
- return
- curframe = inspect.currentframe()
- calframe = inspect.getouterframes(curframe, 2)
- # the caller of _register_hook is 'load_modules' in
- # odoo/modules/loading.py
- load_modules_frame = calframe[1][0]
- # 'update_module' is an argument that 'load_modules' receives with a
- # True-ish value meaning that an install or upgrade of addon has been
- # done during the initialization. We need to move the attachments that
- # could have been created or updated in other addons before this addon
- # was loaded
- update_module = load_modules_frame.f_locals.get("update_module")
-
- # We need to call the migration on the loading of the model because
- # when we are upgrading addons, some of them might add attachments.
- # To be sure they are migrated to the storage we need to call the
- # migration here.
- # Typical example is images of ir.ui.menu which are updated in
- # ir.attachment at every upgrade of the addons
- if update_module:
- self.env["ir.attachment"].sudo()._force_storage_to_object_storage()
-
@property
def _object_storage_default_force_db_config(self):
return {"image/": 51200, "application/javascript": 0, "text/css": 0}
@@ -198,6 +173,7 @@ def _get_datas_related_values(self, data, mimetype):
return values
return super()._get_datas_related_values(data, mimetype)
+ # Odoo methods that we override to use the object storage
@api.model
def _file_read(self, fname):
if self._is_file_from_a_store(fname):
@@ -205,23 +181,13 @@ def _file_read(self, fname):
else:
return super()._file_read(fname)
- def _store_file_read(self, fname):
- storage = fname.partition("://")[0]
- raise NotImplementedError("No implementation for %s" % (storage,))
-
- def _store_file_write(self, key, bin_data):
- storage = self.storage()
- raise NotImplementedError("No implementation for %s" % (storage,))
-
- def _store_file_delete(self, fname):
- storage = fname.partition("://")[0]
- raise NotImplementedError("No implementation for %s" % (storage,))
-
@api.model
def _file_write(self, bin_data, checksum):
location = self.env.context.get("storage_location") or self._storage()
if location in self._get_stores():
- key = self.env.context.get("force_storage_key")
+ key = self.env.context.get("storage_file_path") or self.env.context.get(
+ "force_storage_key"
+ )
if not key:
key = self._compute_checksum(bin_data)
filename = self._store_file_write(key, bin_data)
@@ -230,7 +196,7 @@ def _file_write(self, bin_data, checksum):
return filename
@api.model
- def _file_delete(self, fname):
+ def _file_delete(self, fname) -> None: # pylint: disable=missing-return
if self._is_file_from_a_store(fname):
cr = self.env.cr
# using SQL to include files hidden through unlink or due to record
@@ -244,8 +210,51 @@ def _file_delete(self, fname):
else:
super()._file_delete(fname)
+ # Internal methods to use the object storage
+ @api.model
+ def get_fs_storage_for_code(self, code) -> fsspec.AbstractFileSystem | None:
+ """Return the filesystem for the given storage code"""
+ fs = self.env["fs.storage"].get_fs_by_code(code)
+ if not fs:
+ raise SystemError(f"No Filesystem storage for code {code}")
+ return fs
+
+ @api.model
+ def get_fs_and_path(self, fname) -> tuple[fsspec.AbstractFileSystem, str]:
+ """Return the filesystem and the path for the given fname"""
+ partition = fname.partition("://")
+ storage = partition[0]
+ fs = self.get_fs_storage_for_code(storage)
+ fname = partition[2]
+ return fs, fname
+
+ @api.model
+ def _store_file_read(self, fname: str) -> bytes | None:
+ """Read the file from the filesystem storage"""
+ fs, fname = self.get_fs_and_path(fname)
+ with fs.open(fname, "rb") as fs:
+ return fs.read()
+
+ @api.model
+ def _store_file_write(self, path, bin_data: bytes) -> str:
+ """Write the file to the filesystem storage"""
+ storage = self.env.context.get("storage_location") or self._storage()
+ fs = self.get_fs_storage_for_code(storage)
+ fname = f"{storage}://{path}"
+ with fs.open(path, "wb") as fs:
+ fs.write(bin_data)
+ return fname
+
+ @api.model
+ def _store_file_delete(self, fname):
+ """Delete the file from the filesystem storage"""
+ fs, fname = self.get_fs_and_path(fname)
+ fs.rm(fname)
+
@api.model
def _is_file_from_a_store(self, fname):
+ if not fname:
+ return False
for store_name in self._get_stores():
if self.is_storage_disabled(store_name):
continue
@@ -254,6 +263,73 @@ def _is_file_from_a_store(self, fname):
return True
return False
+ def open(
+ self, mode="rb", block_size=None, cache_options=None, compression=None, **kwargs
+ ) -> io.IOBase:
+ """
+ Return a file-like object from the filesystem storage where the attachment
+ content is stored.
+
+ This method works for all attachments, even if the content is stored in the
+ database or into the odoo filestore. (parameters are ignored in the case
+ of the database storage).
+
+ The resultant instance must function correctly in a context ``with``
+ block.
+
+ Parameters
+ ----------
+ path: str
+ Target file
+ mode: str like 'rb', 'w'
+ See builtin ``open()``
+ block_size: int
+ Some indication of buffering - this is a value in bytes
+ cache_options : dict, optional
+ Extra arguments to pass through to the cache.
+ compression: string or None
+ If given, open file using compression codec. Can either be a compression
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
+ compression from the filename suffix.
+ encoding, errors, newline: passed on to TextIOWrapper for text mode
+
+ Returns
+ -------
+ A file-like object
+
+ Caution: modifications to the file-like object are not transactional.
+ If you modify the file-like object and the current transaction is rolled
+ back, the changes will be saved to the file and not rolled back.
+ Moreover mofication to the content will not be reflected into the cache
+ and could lead to data mismatch when the data will be flush
+
+ TODO if open with 'w' in mode, we could use a buffered IO detecting that
+ the content is modified and invalidating the attachment cache...
+ """
+ self.ensure_one()
+ if self._is_file_from_a_store(self.store_fname):
+ fs, fname = self.get_fs_and_path(self.store_fname)
+ return fs.open(
+ fname,
+ mode=mode,
+ block_size=block_size,
+ cache_options=cache_options,
+ compression=compression,
+ **kwargs,
+ )
+ if self.store_fname:
+ return fsspec.filesystem("file").open(
+ self._full_path(self.store_fname),
+ mode=mode,
+ block_size=block_size,
+ cache_options=cache_options,
+ compression=compression,
+ **kwargs,
+ )
+ if "w" in mode:
+ raise SystemError("Write mode is not supported for data read from database")
+ return io.BytesIO(self.db_datas)
+
@contextmanager
def do_in_new_env(self, new_cr=False):
"""Context manager that yields a new environment
@@ -395,7 +471,8 @@ def _force_storage_to_object_storage(self, new_cr=False):
# is required! It's because of an override of _search in ir.attachment
# which adds ('res_field', '=', False) when the domain does not
# contain 'res_field'.
- # https://github.com/odoo/odoo/blob/9032617120138848c63b3cfa5d1913c5e5ad76db/odoo/addons/base/ir/ir_attachment.py#L344-L347
+ # https://github.com/odoo/odoo/blob/9032617120138848c63b3cfa5d1913c5e5ad76db/
+ # odoo/addons/base/ir/ir_attachment.py#L344-L347
domain = [
"!",
("store_fname", "=like", "{}://%".format(storage)),
@@ -449,4 +526,4 @@ def _force_storage_to_object_storage(self, new_cr=False):
def _get_stores(self):
"""To get the list of stores activated in the system"""
- return []
+ return self.env["fs.storage"].sudo().get_storage_codes()
diff --git a/fs_attachment/models/strtobool.py b/fs_attachment/models/strtobool.py
new file mode 100644
index 0000000000..b1a849f283
--- /dev/null
+++ b/fs_attachment/models/strtobool.py
@@ -0,0 +1,21 @@
+_MAP = {
+ "y": True,
+ "yes": True,
+ "t": True,
+ "true": True,
+ "on": True,
+ "1": True,
+ "n": False,
+ "no": False,
+ "f": False,
+ "false": False,
+ "off": False,
+ "0": False,
+}
+
+
+def strtobool(value):
+ try:
+ return _MAP[str(value).lower()]
+ except KeyError as e:
+ raise ValueError('"{}" is not a valid bool value'.format(value)) from e
diff --git a/fs_attachment/readme/CONTRIBUTORS.rst b/fs_attachment/readme/CONTRIBUTORS.rst
new file mode 100644
index 0000000000..b28dec9346
--- /dev/null
+++ b/fs_attachment/readme/CONTRIBUTORS.rst
@@ -0,0 +1,11 @@
+Thierry Ducrest
+Guewen Baconnier
+Julien Coux
+Akim Juillerat
+Thomas Nowicki
+Vincent Renaville
+Denis Leemann
+Patrick Tombez
+Don Kendall
+Stephane Mangin
+Laurent Mignon
diff --git a/fs_attachment/readme/DESCRIPTION.rst b/fs_attachment/readme/DESCRIPTION.rst
new file mode 100644
index 0000000000..010c473f8b
--- /dev/null
+++ b/fs_attachment/readme/DESCRIPTION.rst
@@ -0,0 +1 @@
+This is a base addon that regroup common code used by addons targeting specific object store
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
new file mode 100644
index 0000000000..a7477c2fdd
--- /dev/null
+++ b/fs_attachment/readme/USAGE.rst
@@ -0,0 +1,41 @@
+Configuration
+-------------
+
+Object storage may be slow, and for this reason, we want to store
+some files in the database whatever.
+
+Small images (128, 256) are used in Odoo in list / kanban views. We
+want them to be fast to read.
+They are generally < 50KB (default configuration) so they don't take
+that much space in database, but they'll be read much faster than from
+the object storage.
+
+The assets (application/javascript, text/css) are stored in database
+as well whatever their size is:
+
+* a database doesn't have thousands of them
+* of course better for performance
+* better portability of a database: when replicating a production
+ instance for dev, the assets are included
+
+This storage configuration can be modified in the system parameter
+``ir_attachment.storage.force.database``, as a JSON value, for instance::
+
+ {"image/": 51200, "application/javascript": 0, "text/css": 0}
+
+Where the key is the beginning of the mimetype to configure and the
+value is the limit in size below which attachments are kept in DB.
+0 means no limit.
+
+Default configuration means:
+
+* images mimetypes (image/png, image/jpeg, ...) below 50KB are
+ stored in database
+* application/javascript are stored in database whatever their size
+* text/css are stored in database whatever their size
+
+Disable attachment storage I/O
+------------------------------
+
+Define a environment variable `DISABLE_ATTACHMENT_STORAGE` set to `1`
+This will prevent any kind of exceptions and read/write on storage attachments.
diff --git a/fs_attachment/tests/__init__.py b/fs_attachment/tests/__init__.py
new file mode 100644
index 0000000000..df1a1d2e3b
--- /dev/null
+++ b/fs_attachment/tests/__init__.py
@@ -0,0 +1 @@
+from . import test_fs_attachment
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
new file mode 100644
index 0000000000..758e4ec5d7
--- /dev/null
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -0,0 +1,88 @@
+# Copyright 2023 ACSONE SA/NV (http://acsone.eu).
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
+import os
+import shutil
+import tempfile
+
+from odoo.tests.common import TransactionCase
+
+
+class TestFSAttachment(TransactionCase):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
+ cls.backend = cls.env.ref("fs_storage.default_fs_storage")
+ temp_dir = tempfile.mkdtemp()
+ cls.temp_backend = cls.env["fs.storage"].create(
+ {
+ "name": "Temp FS Storage",
+ "protocol": "file",
+ "code": "tmp_dir",
+ "directory_path": temp_dir,
+ }
+ )
+ cls.temp_dir = temp_dir
+
+ @cls.addClassCleanup
+ def cleanup_tempdir():
+ shutil.rmtree(temp_dir)
+
+ def test_create_attachment_explicit_location(self):
+ content = b"This is a test attachment"
+ attachment = (
+ self.env["ir.attachment"]
+ .with_context(
+ storage_location=self.temp_backend.code,
+ storage_file_path="test.txt",
+ )
+ .create({"name": "Test Attachment", "raw": content})
+ )
+ self.env.flush_all()
+ self.assertEqual(os.listdir(self.temp_dir), ["test.txt"])
+ self.assertEqual(attachment.raw, content)
+ self.assertFalse(attachment.db_datas)
+ with attachment.open("rb") as f:
+ self.assertEqual(f.read(), content)
+
+ with attachment.open("wb") as f:
+ f.write(b"new")
+ # refresh is required while we don't use a file-like object proxy
+ # that detect the modification of the content and invalidate the
+ # record's cache
+ attachment.refresh()
+ self.assertEqual(attachment.raw, b"new")
+
+ def test_open_attachment_in_db(self):
+ self.env["ir.config_parameter"].sudo().set_param("ir_attachment.location", "db")
+ content = b"This is a test attachment in db"
+ attachment = self.env["ir.attachment"].create(
+ {"name": "Test Attachment", "raw": content}
+ )
+ self.assertFalse(attachment.store_fname)
+ self.assertTrue(attachment.db_datas)
+ with attachment.open("rb") as f:
+ self.assertEqual(f.read(), content)
+ with self.assertRaisesRegex(SystemError, "Write mode is not supported"):
+ attachment.open("wb")
+
+ def test_attachment_open_in_filestore(self):
+ self.env["ir.config_parameter"].sudo().set_param(
+ "ir_attachment.location", "file"
+ )
+ content = b"This is a test attachment in filestore"
+ attachment = self.env["ir.attachment"].create(
+ {"name": "Test Attachment", "raw": content}
+ )
+ self.assertTrue(attachment.store_fname)
+ self.assertFalse(attachment.db_datas)
+ self.assertEqual(attachment.raw, content)
+ with attachment.open("rb") as f:
+ self.assertEqual(f.read(), content)
+ with attachment.open("wb") as f:
+ f.write(b"new")
+ # refresh is required while we don't use a file-like object proxy
+ # that detect the modification of the content and invalidate the
+ # record's cache
+ attachment.refresh()
+ self.assertEqual(attachment.raw, b"new")
diff --git a/setup/fs_attachment/odoo/addons/fs_attachment b/setup/fs_attachment/odoo/addons/fs_attachment
new file mode 120000
index 0000000000..9d55342885
--- /dev/null
+++ b/setup/fs_attachment/odoo/addons/fs_attachment
@@ -0,0 +1 @@
+../../../../fs_attachment
\ No newline at end of file
diff --git a/setup/fs_attachment/setup.py b/setup/fs_attachment/setup.py
new file mode 100644
index 0000000000..28c57bb640
--- /dev/null
+++ b/setup/fs_attachment/setup.py
@@ -0,0 +1,6 @@
+import setuptools
+
+setuptools.setup(
+ setup_requires=['setuptools-odoo'],
+ odoo_addon=True,
+)
diff --git a/test-requirements.txt b/test-requirements.txt
index 932a8957f7..fa87ce7ba6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1 +1,2 @@
mock
+odoo-addon-fs-storage @ git+https://github.com/OCA/storage.git@refs/pull/252/head#subdirectory=setup/fs_storage
From e7036bdc11d7f496d6187b9ae084424d3499d636 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Thu, 13 Apr 2023 17:38:52 +0200
Subject: [PATCH 32/47] fixup! [ADD] fs_attachment: Store attachment through
fsspec
---
fs_attachment/__manifest__.py | 7 +-
fs_attachment/fs_stream.py | 67 +++
fs_attachment/models/__init__.py | 3 +
fs_attachment/models/fs_file_gc.py | 167 ++++++++
fs_attachment/models/fs_storage.py | 87 ++++
fs_attachment/models/ir_attachment.py | 392 ++++++++++++++----
fs_attachment/models/ir_binary.py | 41 ++
fs_attachment/readme/DESCRIPTION.rst | 18 +-
fs_attachment/security/fs_file_gc.xml | 16 +
fs_attachment/tests/__init__.py | 1 +
fs_attachment/tests/test_fs_attachment.py | 191 ++++++++-
.../tests/test_fs_attachment_internal_url.py | 74 ++++
fs_attachment/views/fs_storage.xml | 19 +
13 files changed, 1000 insertions(+), 83 deletions(-)
create mode 100644 fs_attachment/fs_stream.py
create mode 100644 fs_attachment/models/fs_file_gc.py
create mode 100644 fs_attachment/models/fs_storage.py
create mode 100644 fs_attachment/models/ir_binary.py
create mode 100644 fs_attachment/security/fs_file_gc.xml
create mode 100644 fs_attachment/tests/test_fs_attachment_internal_url.py
create mode 100644 fs_attachment/views/fs_storage.xml
diff --git a/fs_attachment/__manifest__.py b/fs_attachment/__manifest__.py
index 82a5053e56..094be911ed 100644
--- a/fs_attachment/__manifest__.py
+++ b/fs_attachment/__manifest__.py
@@ -11,7 +11,12 @@
"category": "Knowledge Management",
"depends": ["fs_storage"],
"website": "https://github.com/OCA/storage",
- "data": ["data/res_config_settings_data.xml"],
+ "data": [
+ "security/fs_file_gc.xml",
+ "views/fs_storage.xml",
+ "data/res_config_settings_data.xml",
+ ],
+ "external_dependencies": {"python": ["python_slugify"]},
"installable": True,
"auto_install": False,
}
diff --git a/fs_attachment/fs_stream.py b/fs_attachment/fs_stream.py
new file mode 100644
index 0000000000..eafe4a43a2
--- /dev/null
+++ b/fs_attachment/fs_stream.py
@@ -0,0 +1,67 @@
+# Copyright 2023 ACSONE SA/NV
+# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
+from __future__ import annotations
+
+from odoo.http import STATIC_CACHE_LONG, Response, Stream, request
+
+from .models.ir_attachment import IrAttachment
+
+try:
+ from werkzeug.utils import send_file as _send_file
+except ImportError:
+ from odoo.tools._vendor.send_file import send_file as _send_file
+
+
+class FsStream(Stream):
+ fs_attachment = None
+
+ @classmethod
+ def from_fs_attachment(cls, attachment: IrAttachment) -> FsStream:
+ attachment.ensure_one()
+ if not attachment.fs_filename:
+ raise ValueError("Attachment is not stored into a filesystem storage")
+ fs_info = attachment.fs_storage_id.root_fs.info(attachment.fs_filename)
+ return cls(
+ mimetype=attachment.mimetype,
+ download_name=attachment.name,
+ conditional=True,
+ etag=attachment.checksum,
+ type="fs",
+ size=fs_info["size"],
+ last_modified=attachment["__last_update"],
+ fs_attachment=attachment,
+ )
+
+ def read(self):
+ if self.type == "fs":
+ with self.fs_attachment.open("rb") as f:
+ return f.read()
+ return super().read()
+
+ def get_response(self, as_attachment=None, immutable=None, **send_file_kwargs):
+ if self.type != "fs":
+ return super().get_response(
+ as_attachment=as_attachment, immutable=immutable, **send_file_kwargs
+ )
+ if as_attachment is None:
+ as_attachment = self.as_attachment
+ if immutable is None:
+ immutable = self.immutable
+ send_file_kwargs = {
+ "mimetype": self.mimetype,
+ "as_attachment": as_attachment,
+ "download_name": self.download_name,
+ "conditional": self.conditional,
+ "etag": self.etag,
+ "last_modified": self.last_modified,
+ "max_age": STATIC_CACHE_LONG if immutable else self.max_age,
+ "environ": request.httprequest.environ,
+ "response_class": Response,
+ **send_file_kwargs,
+ }
+ # The file will be closed by werkzeug...
+ f = self.fs_attachment.open("rb")
+ res = _send_file(f, **send_file_kwargs)
+ if immutable and res.cache_control:
+ res.cache_control["immutable"] = None
+ return res
diff --git a/fs_attachment/models/__init__.py b/fs_attachment/models/__init__.py
index aaf38a167c..bfe56d2fda 100644
--- a/fs_attachment/models/__init__.py
+++ b/fs_attachment/models/__init__.py
@@ -1 +1,4 @@
+from . import fs_file_gc
+from . import fs_storage
from . import ir_attachment
+from . import ir_binary
diff --git a/fs_attachment/models/fs_file_gc.py b/fs_attachment/models/fs_file_gc.py
new file mode 100644
index 0000000000..1e211da392
--- /dev/null
+++ b/fs_attachment/models/fs_file_gc.py
@@ -0,0 +1,167 @@
+# Copyright 2023 ACSONE SA/NV
+# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
+import logging
+import threading
+from contextlib import closing, contextmanager
+
+import odoo
+from odoo import api, fields, models
+from odoo.sql_db import Cursor
+
+_logger = logging.getLogger(__name__)
+
+
+class FsFileGC(models.Model):
+
+ _name = "fs.file.gc"
+ _description = "Filesystem storage file garbage collector"
+
+ store_fname = fields.Char("Stored Filename")
+ fs_storage_code = fields.Char("Storage Code")
+
+ _sql_constraints = [
+ (
+ "store_fname_uniq",
+ "unique (store_fname)",
+ "The stored filename must be unique!",
+ ),
+ ]
+
+ def _is_test_mode(self) -> bool:
+ """Return True if we are running the tests, so we do not mark files for
+ garbage collection into a separate transaction.
+ """
+ return (
+ getattr(threading.current_thread(), "testing", False)
+ or self.env.registry.in_test_mode()
+ )
+
+ @contextmanager
+ def _in_new_cursor(self) -> Cursor:
+ """Context manager to execute code in a new cursor"""
+ if self._is_test_mode():
+ yield self.env.cr
+ return
+
+ registry = odoo.modules.registry.Registry.new(self.env.cr.dbname)
+ with closing(registry.cursor()) as cr:
+ try:
+ yield cr
+ except Exception:
+ cr.rollback()
+ raise
+ else:
+ # disable pylint error because this is a valid commit,
+ # we are in a new env
+ cr.commit() # pylint: disable=invalid-commit
+
+ @api.model
+ def _mark_for_gc(self, store_fname: str) -> None:
+ """Mark a file for garbage collection"
+
+ This process is done in a separate transaction since the data must be
+ preserved even if the transaction is rolled back.
+ """
+ with self._in_new_cursor() as cr:
+ code = store_fname.partition("://")[0]
+ # use plain SQL to avoid the ORM ignore conflicts errors
+ cr.execute(
+ """
+ INSERT INTO
+ fs_file_gc (
+ store_fname,
+ fs_storage_code,
+ create_date,
+ write_date,
+ create_uid,
+ write_uid
+ )
+ VALUES (
+ %s,
+ %s,
+ now() at time zone 'UTC',
+ now() at time zone 'UTC',
+ %s,
+ %s
+ )
+ ON CONFLICT DO NOTHING
+ """,
+ (store_fname, code, self.env.uid, self.env.uid),
+ )
+
+ @api.autovacuum
+ def _gc_files(self) -> None:
+ """Garbage collect files"""
+ # This method is mainly a copy of the method _gc_file_store_unsafe()
+ # from the module fs_attachment. The only difference is that the list
+ # of files to delete is retrieved from the table fs_file_gc instead
+ # of the odoo filestore.
+
+ # Continue in a new transaction. The LOCK statement below must be the
+ # first one in the current transaction, otherwise the database snapshot
+ # used by it may not contain the most recent changes made to the table
+ # ir_attachment! Indeed, if concurrent transactions create attachments,
+ # the LOCK statement will wait until those concurrent transactions end.
+ # But this transaction will not see the new attachements if it has done
+ # other requests before the LOCK (like the method _storage() above).
+ cr = self._cr
+ cr.commit() # pylint: disable=invalid-commit
+
+ # prevent all concurrent updates on ir_attachment and fs_file_gc
+ # while collecting, but only attempt to grab the lock for a little bit,
+ # otherwise it'd start blocking other transactions.
+ # (will be retried later anyway)
+ cr.execute("SET LOCAL lock_timeout TO '10s'")
+ cr.execute("LOCK fs_file_gc IN SHARE MODE")
+ cr.execute("LOCK ir_attachment IN SHARE MODE")
+
+ self._gc_files_unsafe()
+
+ # commit to release the lock
+ cr.commit() # pylint: disable=invalid-commit
+
+ def _gc_files_unsafe(self) -> None:
+ # get the list of fs.storage codes that must be autovacuumed
+ codes = (
+ self.env["fs.storage"].search([("autovacuum_gc", "=", True)]).mapped("code")
+ )
+ # we process by batch of storage codes.
+ self._cr.execute(
+ """
+ SELECT
+ fs_storage_code,
+ array_agg(store_fname)
+
+ FROM
+ fs_file_gc
+ WHERE
+ fs_storage_code IN %s
+ AND NOT EXISTS (
+ SELECT 1
+ FROM ir_attachment
+ WHERE store_fname = fs_file_gc.store_fname
+ )
+ GROUP BY
+ fs_storage_code
+ """,
+ (tuple(codes),),
+ )
+ for code, store_fnames in self._cr.fetchall():
+ fs = self.env["fs.storage"].get_fs_by_code(code, root=True)
+ for store_fname in store_fnames:
+ try:
+ file_path = store_fname.partition("://")[2]
+ fs.rm(file_path)
+ except Exception:
+ _logger.info("Failed to remove file %s", store_fname)
+
+ # delete the records from the table fs_file_gc
+ self._cr.execute(
+ """
+ DELETE FROM
+ fs_file_gc
+ WHERE
+ fs_storage_code IN %s
+ """,
+ (tuple(codes),),
+ )
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
new file mode 100644
index 0000000000..617aa1f94c
--- /dev/null
+++ b/fs_attachment/models/fs_storage.py
@@ -0,0 +1,87 @@
+# Copyright 2023 ACSONE SA/NV
+# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
+
+from odoo import api, fields, models, tools
+
+from .ir_attachment import IrAttachment
+
+
+class FsStorage(models.Model):
+
+ _inherit = "fs.storage"
+
+ optimizes_directory_path = fields.Boolean(
+ help="If checked, the directory path will be optimized to avoid "
+ "too much files into the same directory. This options is used when the "
+ "storage is used to store attachments. Depending on the storage, this "
+ "option can be ignored. It's useful for storage based on real file. "
+ "This way, files with similar properties will be stored in the same "
+ "directory, avoiding overcrowding in the root directory and optimizing "
+ "access times."
+ )
+
+ autovacuum_gc = fields.Boolean(
+ string="Autovacuum Garbage Collection",
+ default=True,
+ help="If checked, the autovacuum of the garbage collection will be "
+ "automatically executed when the storage is used to store attachments. "
+ "Sometime, the autovacuum is to avoid when files in the storage are referenced "
+ "by other systems (like a website). In such case, records in the fs.file.gc "
+ "table must be manually processed.",
+ )
+ base_url = fields.Char(default="")
+ is_directory_path_in_url = fields.Boolean(
+ default=False,
+ help="Normally the directory_path is for internal usage. "
+ "If this flag is enabled the path will be used to compute the "
+ "public URL.",
+ )
+ base_url_for_files = fields.Char(compute="_compute_base_url_for_files", store=True)
+ backend_view_use_internal_url = fields.Boolean(
+ help="Decide if Odoo backend views should use the external URL (usually a CDN) "
+ "or the internal url with direct access to the storage. "
+ "This could save you some money if you pay by CDN traffic."
+ )
+
+ @api.model
+ @tools.ormcache("code")
+ def _must_optimize_directory_path(self, code):
+ return bool(
+ self.search([("code", "=", code), ("optimizes_directory_path", "=", True)])
+ )
+
+ @api.model
+ @tools.ormcache("code")
+ def _must_autovacuum_gc(self, code):
+ return bool(self.search([("code", "=", code), ("autovacuum_gc", "=", True)]))
+
+ @api.depends("base_url", "is_directory_path_in_url")
+ def _compute_base_url_for_files(self):
+ for rec in self:
+ if not rec.base_url:
+ rec.base_url_for_files = ""
+ continue
+ parts = [rec.base_url]
+ if rec.is_directory_path_in_url:
+ parts.append(rec.directory_path)
+ rec.base_url_for_files = "/".join(parts)
+
+ @api.model
+ def _get_url_for_attachment(
+ self, attachment: IrAttachment, exclude_base_url: bool = False
+ ) -> str | None:
+ """Return the URL to access the attachment
+
+ :param attachment: an attachment record
+ :return: the URL to access the attachment
+ """
+ fs_storage = self.get_by_code(attachment.fs_storage_code)
+ if not fs_storage:
+ return None
+ base_url = fs_storage.base_url_for_files
+ if not base_url:
+ return None
+ if not exclude_base_url:
+ base_url = base_url.replace(base_url, "") or "/"
+ parts = [base_url, attachment.fs_filename]
+ return "/".join([x.rstrip("/") for x in parts if x])
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index e82a4fab8b..19b3e4030d 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -4,15 +4,18 @@
import io
import logging
+import mimetypes
import os
+import re
import time
from contextlib import closing, contextmanager
import fsspec # pylint: disable=missing-manifest-dependency
import psycopg2
+from slugify import slugify # pylint: disable=missing-manifest-dependency
import odoo
-from odoo import _, api, exceptions, models
+from odoo import _, api, exceptions, fields, models
from odoo.osv.expression import AND, OR, normalize_domain
from odoo.tools.safe_eval import const_eval
@@ -21,6 +24,9 @@
_logger = logging.getLogger(__name__)
+REGEX_SLUGIFY = r"[^-a-z0-9_]+"
+
+
def is_true(strval):
return bool(strtobool(strval or "0"))
@@ -45,8 +51,82 @@ def clean_fs(files):
class IrAttachment(models.Model):
_inherit = "ir.attachment"
+ fs_filename = fields.Char(
+ "File Name into the filesystem storage",
+ help="The name of the file in the filesystem storage."
+ "To preserve the mimetype and the meaning of the filename"
+ "the filename is computed from the name and the extension",
+ readonly=True,
+ )
+
+ internal_url = fields.Char(
+ "Internal URL",
+ compute="_compute_internal_url",
+ help="The URL to access the file from the server.",
+ )
+
+ fs_url = fields.Char(
+ "Filesystem URL",
+ compute="_compute_fs_url",
+ help="The URL to access the file from the filesystem storage.",
+ store=True,
+ )
+ fs_url_path = fields.Char(
+ "Filesystem URL Path",
+ compute="_compute_fs_url_path",
+ help="The path to access the file from the filesystem storage.",
+ )
+ fs_storage_code = fields.Char(
+ "Filesystem Storage Code",
+ related="fs_storage_id.code",
+ store=True,
+ )
+ fs_storage_id = fields.Many2one(
+ "fs.storage",
+ "Filesystem Storage",
+ compute="_compute_fs_storage_id",
+ help="The storage where the file is stored.",
+ store=True,
+ ondelete="restrict",
+ )
+
+ @api.depends("name")
+ def _compute_internal_url(self) -> None:
+ for rec in self:
+ filename, extension = os.path.splitext(rec.name)
+ if not extension:
+ extension = mimetypes.guess_extension(rec.mimetype)
+ rec.internal_url = f"/web/content/{rec.id}/{filename}{extension}"
+
+ @api.depends("fs_filename")
+ def _compute_fs_url(self) -> None:
+ for rec in self:
+ rec.fs_url = None
+ if rec.fs_filename:
+ rec.fs_url = self.env["fs.storage"]._get_url_for_attachment(rec)
+
+ @api.depends("fs_filename")
+ def _compute_fs_url_path(self) -> None:
+ for rec in self:
+ rec.fs_url_path = None
+ if rec.fs_filename:
+ rec.fs_url_path = self.env["fs.storage"]._get_url_for_attachment(
+ rec, exclude_base_url=True
+ )
+
+ @api.depends("fs_filename")
+ def _compute_fs_storage_id(self):
+ for rec in self:
+ if rec.store_fname:
+ code = rec.store_fname.partition("://")[0]
+ fs_storage = self.env["fs.storage"].get_by_code(code)
+ if fs_storage != rec.fs_storage_id:
+ rec.fs_storage_id = fs_storage
+ elif rec.fs_storage_id:
+ rec.fs_storage_id = None
+
@staticmethod
- def is_storage_disabled(storage=None, log=True):
+ def _is_storage_disabled(storage=None, log=True):
msg = _("Storages are disabled (see environment configuration).")
if storage:
msg = _("Storage '%s' is disabled (see environment configuration).") % (
@@ -146,7 +226,7 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
``_store_in_db_instead_of_object_storage_domain``.
"""
- if self.is_storage_disabled():
+ if self._is_storage_disabled():
return True
storage_config = self._get_storage_force_db_config()
for mimetype_key, limit in storage_config.items():
@@ -159,7 +239,7 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
def _get_datas_related_values(self, data, mimetype):
storage = self.env.context.get("storage_location") or self._storage()
- if data and storage in self._get_stores():
+ if data and storage in self._get_storage_codes():
if self._store_in_db_instead_of_object_storage(data, mimetype):
# compute the fields that depend on datas
bin_data = data
@@ -173,96 +253,253 @@ def _get_datas_related_values(self, data, mimetype):
return values
return super()._get_datas_related_values(data, mimetype)
- # Odoo methods that we override to use the object storage
+ ###########################################################
+ # Odoo methods that we override to use the object storage #
+ ###########################################################
+
+ @api.model_create_multi
+ def create(self, vals_list):
+ attachments = super().create(vals_list)
+ attachments._enforce_meaningful_storage_filename()
+ return attachments
+
@api.model
def _file_read(self, fname):
- if self._is_file_from_a_store(fname):
- return self._store_file_read(fname)
+ if self._is_file_from_a_storage(fname):
+ return self._storage_file_read(fname)
else:
return super()._file_read(fname)
@api.model
def _file_write(self, bin_data, checksum):
location = self.env.context.get("storage_location") or self._storage()
- if location in self._get_stores():
- key = self.env.context.get("storage_file_path") or self.env.context.get(
- "force_storage_key"
- )
- if not key:
- key = self._compute_checksum(bin_data)
- filename = self._store_file_write(key, bin_data)
+ if location in self._get_storage_codes():
+ filename = self._storage_file_write(bin_data)
else:
filename = super()._file_write(bin_data, checksum)
return filename
@api.model
def _file_delete(self, fname) -> None: # pylint: disable=missing-return
- if self._is_file_from_a_store(fname):
+ if self._is_file_from_a_storage(fname):
cr = self.env.cr
# using SQL to include files hidden through unlink or due to record
# rules
cr.execute(
- "SELECT COUNT(*) FROM ir_attachment " "WHERE store_fname = %s", (fname,)
+ "SELECT COUNT(*) FROM ir_attachment WHERE store_fname = %s", (fname,)
)
count = cr.fetchone()[0]
if not count:
- self._store_file_delete(fname)
+ self._storage_file_delete(fname)
else:
super()._file_delete(fname)
- # Internal methods to use the object storage
- @api.model
- def get_fs_storage_for_code(self, code) -> fsspec.AbstractFileSystem | None:
- """Return the filesystem for the given storage code"""
- fs = self.env["fs.storage"].get_fs_by_code(code)
- if not fs:
- raise SystemError(f"No Filesystem storage for code {code}")
- return fs
+ def _set_attachment_data(self, asbytes) -> None: # pylint: disable=missing-return
+ super()._set_attachment_data(asbytes)
+ self._enforce_meaningful_storage_filename()
+ ##############################################
+ # Internal methods to use the object storage #
+ ##############################################
@api.model
- def get_fs_and_path(self, fname) -> tuple[fsspec.AbstractFileSystem, str]:
- """Return the filesystem and the path for the given fname"""
- partition = fname.partition("://")
- storage = partition[0]
- fs = self.get_fs_storage_for_code(storage)
- fname = partition[2]
- return fs, fname
-
- @api.model
- def _store_file_read(self, fname: str) -> bytes | None:
+ def _storage_file_read(self, fname: str) -> bytes | None:
"""Read the file from the filesystem storage"""
- fs, fname = self.get_fs_and_path(fname)
+ fs, _storage, fname = self._fs_parse_store_fname(fname, root=True)
with fs.open(fname, "rb") as fs:
return fs.read()
@api.model
- def _store_file_write(self, path, bin_data: bytes) -> str:
+ def _storage_file_write(self, bin_data: bytes) -> str:
"""Write the file to the filesystem storage"""
storage = self.env.context.get("storage_location") or self._storage()
- fs = self.get_fs_storage_for_code(storage)
+ fs = self._get_fs_storage_for_code(storage)
+ path = self._get_fs_path(storage, bin_data)
+ dirname = os.path.dirname(path)
+ if not fs.exists(dirname):
+ fs.makedirs(dirname)
fname = f"{storage}://{path}"
with fs.open(path, "wb") as fs:
fs.write(bin_data)
+ self._fs_mark_for_gc(fname)
return fname
@api.model
- def _store_file_delete(self, fname):
+ def _storage_file_delete(self, fname):
"""Delete the file from the filesystem storage"""
- fs, fname = self.get_fs_and_path(fname)
- fs.rm(fname)
+ self._fs_mark_for_gc(fname)
+
+ @api.model
+ def _get_fs_path(self, storage_code: str, bin_data: bytes) -> str:
+ """Compute the path to store the file in the filesystem storage"""
+ key = self.env.context.get("force_storage_key")
+ if not key:
+ key = self._compute_checksum(bin_data)
+ if self.env["fs.storage"]._must_optimize_directory_path(storage_code):
+ # Generate a unique directory path based on the file's hash
+ key = os.path.join(key[:2], key[2:4], key)
+ # Generate a unique directory path based on the file's hash
+ return key
+
+ def _build_fs_filename(self):
+ """Build the filename to store in the filesystem storage
+
+ The filename is computed from the name, the extension and a version
+ number. The version number is incremented each time we build a new
+ filename. To know if a filename has already been build, we check if
+ the fs_filename field is set. If it is set, we increment the version
+ number. The version number is taken from the computed filename.
+
+ The format of the filename is:
+ --.
+ """
+ self.ensure_one()
+ filename, extension = os.path.splitext(self.name)
+ if not extension:
+ extension = mimetypes.guess_extension(self.mimetype)
+ version = 0
+ if self.fs_filename:
+ version = self._parse_fs_filename(self.fs_filename)[2] + 1
+ return "{}{}".format(
+ slugify(
+ "{}-{}-{}".format(filename, self.id, version),
+ regex_pattern=REGEX_SLUGIFY,
+ ),
+ extension,
+ )
+
+ def _enforce_meaningful_storage_filename(self) -> None:
+ """Enforce meaningful filename for files stored in the filesystem storage
+
+ The filename of the file in the filesystem storage is computed from
+ the mimetype and the name of the attachment. This method is called
+ when an attachment is created to ensure that the filename of the file
+ in the filesystem keeps the same meaning as the name of the attachment.
+
+ Keeping the same meaning and mimetype is important to also ease to provide
+ a meaningful and SEO friendly URL to the file in the filesystem storage.
+ """
+ for attachment in self:
+ if not self._is_file_from_a_storage(attachment.store_fname):
+ continue
+ fs, storage, filename = self._fs_parse_store_fname(attachment.store_fname)
+ if self._is_fs_filename_meaningful(filename):
+ continue
+ new_filename = attachment._build_fs_filename()
+ # we must keep the same full path as the original filename
+ new_filename = os.path.join(os.path.dirname(filename), new_filename)
+ fs.rename(filename, new_filename)
+ new_filename = fs.info(new_filename)["name"]
+ attachment.fs_filename = new_filename
+ # we need to update the store_fname with the new filename by
+ # calling the write method of the field since the write method
+ # of ir_attachment prevent normal write on store_fname
+ attachment._fields["store_fname"].write(
+ attachment, f"{storage}://{new_filename}"
+ )
+ self._fs_mark_for_gc(attachment.store_fname)
+
+ @api.model
+ def _get_fs_storage_for_code(
+ self, code: str, root: bool = False
+ ) -> fsspec.AbstractFileSystem | None:
+ """Return the filesystem for the given storage code"""
+ fs = self.env["fs.storage"].get_fs_by_code(code, root=root)
+ if not fs:
+ raise SystemError(f"No Filesystem storage for code {code}")
+ return fs
+
+ @api.model
+ def _fs_parse_store_fname(
+ self, fname: str, root: bool = False
+ ) -> tuple[fsspec.AbstractFileSystem, str, str]:
+ """Return the filesystem, the storage code and the path for the given fname
+
+ :param fname: the fname to parse
+ :param base: if True, return the base filesystem
+ """
+ partition = fname.partition("://")
+ storage_code = partition[0]
+ fs = self._get_fs_storage_for_code(storage_code, root=root)
+ fname = partition[2]
+ return fs, storage_code, fname
+
+ @api.model
+ def _is_fs_filename_meaningful(self, filename: str) -> bool:
+ """Return True if the filename is meaningful
+ A filename is meaningful if it's formatted as
+ """
+ if not filename:
+ return False
+ filename = os.path.basename(filename)
+ re_fs_filename_parser = re.compile(
+ r"^(?P.+)-(?P\d+)-(?P\d+)(?P\..+)$"
+ )
+ match = re_fs_filename_parser.match(filename)
+ if not match:
+ return False
+ name, res_id, version, extension = match.groups()
+ return bool(name and res_id and version and extension)
@api.model
- def _is_file_from_a_store(self, fname):
+ def _parse_fs_filename(self, filename: str) -> tuple[str, int, int, str] | None:
+ """Parse the filename and return the name, id, version and extension
+ --.
+ """
+ if not filename:
+ return "", "", "", ""
+ filename = os.path.basename(filename)
+ re_fs_filename_parser = re.compile(
+ r"^(?P.+)-(?P\d+)-(?P\d+)(?P\..+)$"
+ )
+ match = re_fs_filename_parser.match(filename)
+ if not match:
+ return None
+ name, res_id, version, extension = match.groups()
+ return name, int(res_id), int(version), extension
+
+ @api.model
+ def _is_file_from_a_storage(self, fname):
if not fname:
return False
- for store_name in self._get_stores():
- if self.is_storage_disabled(store_name):
+ for storage_code in self._get_storage_codes():
+ if self._is_storage_disabled(storage_code):
continue
- uri = "{}://".format(store_name)
+ uri = "{}://".format(storage_code)
if fname.startswith(uri):
return True
return False
+ @api.model
+ def _fs_mark_for_gc(self, fname):
+ """Mark the file for deletion
+
+ The file will be deleted by the garbage collector if it's no more
+ referenced by any attachment. We use a garbage collector to enforce
+ the transaction mechanism between Odoo and the filesystem storage.
+ Files are added to the garbage collector when:
+ - each time a file is created in the filesystem storage
+ - an attachment is deleted
+
+ Whatever the result of the current transaction, the information of files
+ marked for deletion is stored in the database.
+
+ When the garbage collector is called, it will check if the file is still
+ referenced by an attachment. If not, the file is physically deleted from
+ the filesystem storage.
+
+ If the creation of the attachment fails, since the file is marked for
+ deletion when it's written into the filesystem storage, it will be
+ deleted by the garbage collector.
+
+ If the content of the attachment is updated, we always create a new file.
+ This new file is marked for deletion and the old one too. If the transaction
+ succeeds, the old file is deleted by the garbage collector since it's no
+ more referenced by any attachment. If the transaction fails, the old file
+ is not deleted since it's still referenced by the attachment but the new
+ file is deleted since it's marked for deletion and not referenced.
+ """
+ self.env["fs.file.gc"]._mark_for_gc(fname)
+
def open(
self, mode="rb", block_size=None, cache_options=None, compression=None, **kwargs
) -> io.IOBase:
@@ -307,8 +544,10 @@ def open(
the content is modified and invalidating the attachment cache...
"""
self.ensure_one()
- if self._is_file_from_a_store(self.store_fname):
- fs, fname = self.get_fs_and_path(self.store_fname)
+ if self._is_file_from_a_storage(self.store_fname):
+ fs, _storage, fname = self._fs_parse_store_fname(
+ self.store_fname, root=True
+ )
return fs.open(
fname,
mode=mode,
@@ -331,34 +570,41 @@ def open(
return io.BytesIO(self.db_datas)
@contextmanager
- def do_in_new_env(self, new_cr=False):
+ def _do_in_new_env(self, new_cr=False):
"""Context manager that yields a new environment
Using a new Odoo Environment thus a new PG transaction.
"""
- with api.Environment.manage():
- if new_cr:
- registry = odoo.modules.registry.Registry.new(self.env.cr.dbname)
- with closing(registry.cursor()) as cr:
- try:
- yield self.env(cr=cr)
- except Exception:
- cr.rollback()
- raise
- else:
- # disable pylint error because this is a valid commit,
- # we are in a new env
- cr.commit() # pylint: disable=invalid-commit
- else:
- # make a copy
- yield self.env()
+ if new_cr:
+ registry = odoo.modules.registry.Registry.new(self.env.cr.dbname)
+ with closing(registry.cursor()) as cr:
+ try:
+ yield self.env(cr=cr)
+ except Exception:
+ cr.rollback()
+ raise
+ else:
+ # disable pylint error because this is a valid commit,
+ # we are in a new env
+ cr.commit() # pylint: disable=invalid-commit
+ else:
+ # make a copy
+ yield self.env()
+
+ def _get_storage_codes(self):
+ """Get the list of filesystem storage active in the system"""
+ return self.env["fs.storage"].sudo().get_storage_codes()
+
+ ################################
+ # useful methods for migration #
+ ################################
def _move_attachment_to_store(self):
self.ensure_one()
_logger.info("inspecting attachment %s (%d)", self.name, self.id)
fname = self.store_fname
storage = fname.partition("://")[0]
- if self.is_storage_disabled(storage):
+ if self._is_storage_disabled(storage):
fname = False
if fname:
# migrating from filesystem filestore
@@ -387,7 +633,7 @@ def force_storage(self):
_("Only administrators can execute this action.")
)
location = self.env.context.get("storage_location") or self._storage()
- if location not in self._get_stores():
+ if location not in self._get_storage_codes():
return super().force_storage()
self._force_storage_to_object_storage()
@@ -407,9 +653,9 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
It is not called anywhere, but can be called by RPC or scripts.
"""
storage = self._storage()
- if self.is_storage_disabled(storage):
+ if self._is_storage_disabled(storage):
return
- if storage not in self._get_stores():
+ if storage not in self._get_storage_codes():
return
domain = AND(
@@ -428,7 +674,7 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
)
)
- with self.do_in_new_env(new_cr=new_cr) as new_env:
+ with self._do_in_new_env(new_cr=new_cr) as new_env:
model_env = new_env["ir.attachment"].with_context(prefetch_fields=False)
attachment_ids = model_env.search(domain).ids
if not attachment_ids:
@@ -465,7 +711,7 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
def _force_storage_to_object_storage(self, new_cr=False):
_logger.info("migrating files to the object storage")
storage = self.env.context.get("storage_location") or self._storage()
- if self.is_storage_disabled(storage):
+ if self._is_storage_disabled(storage):
return
# The weird "res_field = False OR res_field != False" domain
# is required! It's because of an override of _search in ir.attachment
@@ -484,7 +730,7 @@ def _force_storage_to_object_storage(self, new_cr=False):
# below. We do not create a new cursor by default because it causes
# serialization issues due to concurrent updates on attachments during
# the installation
- with self.do_in_new_env(new_cr=new_cr) as new_env:
+ with self._do_in_new_env(new_cr=new_cr) as new_env:
model_env = new_env["ir.attachment"]
ids = model_env.search(domain).ids
files_to_clean = []
@@ -523,7 +769,3 @@ def _force_storage_to_object_storage(self, new_cr=False):
if files_to_clean:
new_env.cr.commit()
clean_fs(files_to_clean)
-
- def _get_stores(self):
- """To get the list of stores activated in the system"""
- return self.env["fs.storage"].sudo().get_storage_codes()
diff --git a/fs_attachment/models/ir_binary.py b/fs_attachment/models/ir_binary.py
new file mode 100644
index 0000000000..d06d5f1db0
--- /dev/null
+++ b/fs_attachment/models/ir_binary.py
@@ -0,0 +1,41 @@
+# Copyright 2023 ACSONE SA/NV
+# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
+import logging
+
+from odoo import models
+
+from ..fs_stream import FsStream
+
+_logger = logging.getLogger(__name__)
+
+
+class IrBinary(models.AbstractModel):
+
+ _inherit = "ir.binary"
+
+ def _record_to_stream(self, record, field_name):
+ # Extend base implementation to support attachment stored into a
+ # filesystem storage
+ fs_attachment = None
+ if record._name == "ir.attachment" and record.fs_filename:
+ fs_attachment = record
+ record.check_field_access_rights("read", [field_name])
+ field_def = record._fields[field_name]
+ if field_def.attachment and not field_def.compute and not field_def.related:
+ field_attachment = (
+ self.env["ir.attachment"]
+ .sudo()
+ .search(
+ domain=[
+ ("res_model", "=", record._name),
+ ("res_id", "=", record.id),
+ ("res_field", "=", field_name),
+ ],
+ limit=1,
+ )
+ )
+ if field_attachment.fs_filename:
+ fs_attachment = field_attachment
+ if fs_attachment:
+ return FsStream.from_fs_attachment(fs_attachment)
+ return super()._record_to_stream(record, field_name)
diff --git a/fs_attachment/readme/DESCRIPTION.rst b/fs_attachment/readme/DESCRIPTION.rst
index 010c473f8b..1e32edad54 100644
--- a/fs_attachment/readme/DESCRIPTION.rst
+++ b/fs_attachment/readme/DESCRIPTION.rst
@@ -1 +1,17 @@
-This is a base addon that regroup common code used by addons targeting specific object store
+In some cases, you need to store attachment in another system that the Odoo's
+filestore. For example, when your deployment is based on a multi-server
+architecture to ensure redundancy and scalability, your attachments must
+be stored in a way that they are accessible from all the servers. In this
+way, you can use a shared storage system like NFS or a cloud storage like
+S3 compliant storage, or....
+
+This addon extend the storage mechanism of Odoo's attachments to allow
+you to store them in any storage filesystem supported by the Python
+library `fsspec `_ and made
+available via the `fs_storage` addon.
+
+In contrast to Odoo, when a file is stored into an external storage, this
+addon ensures that the filename keeps its meaning (In odoo the filename
+into the filestore is the file content checksum). Concretely the filename
+is based on the pattern:
+'--.'
diff --git a/fs_attachment/security/fs_file_gc.xml b/fs_attachment/security/fs_file_gc.xml
new file mode 100644
index 0000000000..077c38c430
--- /dev/null
+++ b/fs_attachment/security/fs_file_gc.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+ fs.file.gc access name
+
+
+
+
+
+
+
+
+
diff --git a/fs_attachment/tests/__init__.py b/fs_attachment/tests/__init__.py
index df1a1d2e3b..21e0af46e4 100644
--- a/fs_attachment/tests/__init__.py
+++ b/fs_attachment/tests/__init__.py
@@ -1 +1,2 @@
from . import test_fs_attachment
+from . import test_fs_attachment_internal_url
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index 758e4ec5d7..c653c1d983 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -23,11 +23,18 @@ def setUpClass(cls):
}
)
cls.temp_dir = temp_dir
+ cls.gc_file_model = cls.env["fs.file.gc"]
@cls.addClassCleanup
def cleanup_tempdir():
shutil.rmtree(temp_dir)
+ def tearDown(self) -> None:
+ super().tearDown()
+ # empty the temp dir
+ for f in os.listdir(self.temp_dir):
+ os.remove(os.path.join(self.temp_dir, f))
+
def test_create_attachment_explicit_location(self):
content = b"This is a test attachment"
attachment = (
@@ -36,10 +43,10 @@ def test_create_attachment_explicit_location(self):
storage_location=self.temp_backend.code,
storage_file_path="test.txt",
)
- .create({"name": "Test Attachment", "raw": content})
+ .create({"name": "test.txt", "raw": content})
)
self.env.flush_all()
- self.assertEqual(os.listdir(self.temp_dir), ["test.txt"])
+ self.assertEqual(os.listdir(self.temp_dir), [f"test-{attachment.id}-0.txt"])
self.assertEqual(attachment.raw, content)
self.assertFalse(attachment.db_datas)
with attachment.open("rb") as f:
@@ -50,14 +57,14 @@ def test_create_attachment_explicit_location(self):
# refresh is required while we don't use a file-like object proxy
# that detect the modification of the content and invalidate the
# record's cache
- attachment.refresh()
+ attachment.invalidate_recordset()
self.assertEqual(attachment.raw, b"new")
def test_open_attachment_in_db(self):
self.env["ir.config_parameter"].sudo().set_param("ir_attachment.location", "db")
content = b"This is a test attachment in db"
attachment = self.env["ir.attachment"].create(
- {"name": "Test Attachment", "raw": content}
+ {"name": "test.txt", "raw": content}
)
self.assertFalse(attachment.store_fname)
self.assertTrue(attachment.db_datas)
@@ -72,7 +79,7 @@ def test_attachment_open_in_filestore(self):
)
content = b"This is a test attachment in filestore"
attachment = self.env["ir.attachment"].create(
- {"name": "Test Attachment", "raw": content}
+ {"name": "test.txt", "raw": content}
)
self.assertTrue(attachment.store_fname)
self.assertFalse(attachment.db_datas)
@@ -84,5 +91,177 @@ def test_attachment_open_in_filestore(self):
# refresh is required while we don't use a file-like object proxy
# that detect the modification of the content and invalidate the
# record's cache
- attachment.refresh()
+ attachment.invalidate_recordset()
+ self.assertEqual(attachment.raw, b"new")
+
+ def test_default_attachment_store_in_fs(self):
+ self.env["ir.config_parameter"].sudo().set_param(
+ "ir_attachment.location", "tmp_dir"
+ )
+ content = b"This is a test attachment in filestore tmp_dir"
+ attachment = self.env["ir.attachment"].create(
+ {"name": "test.txt", "raw": content}
+ )
+ self.assertTrue(attachment.store_fname)
+ self.assertFalse(attachment.db_datas)
+ self.assertEqual(attachment.raw, content)
+ self.env.flush_all()
+
+ initial_filename = f"test-{attachment.id}-0.txt"
+
+ self.assertEqual(os.listdir(self.temp_dir), [initial_filename])
+
+ with attachment.open("rb") as f:
+ self.assertEqual(f.read(), content)
+
+ with open(os.path.join(self.temp_dir, initial_filename), "rb") as f:
+ self.assertEqual(f.read(), content)
+
+ # update the attachment
+ attachment.raw = b"new"
+ with attachment.open("rb") as f:
+ self.assertEqual(f.read(), b"new")
+ # a new file version is created
+ new_filename = f"test-{attachment.id}-1.txt"
+ with open(os.path.join(self.temp_dir, new_filename), "rb") as f:
+ self.assertEqual(f.read(), b"new")
self.assertEqual(attachment.raw, b"new")
+ self.assertEqual(
+ attachment.store_fname, f"tmp_dir://{self.temp_dir}/{new_filename}"
+ )
+
+ # the original file is to to be deleted by the GC
+ self.assertEqual(
+ set(os.listdir(self.temp_dir)), {initial_filename, new_filename}
+ )
+
+ # run the GC
+ self.env.flush_all()
+ self.gc_file_model._gc_files_unsafe()
+ self.assertEqual(os.listdir(self.temp_dir), [new_filename])
+
+ attachment.unlink()
+ # concrete file deletion is done by the GC
+ self.env.flush_all()
+ self.assertEqual(os.listdir(self.temp_dir), [new_filename])
+ # run the GC
+ self.gc_file_model._gc_files_unsafe()
+ self.assertEqual(os.listdir(self.temp_dir), [])
+
+ def test_fs_update_transactionnal(self):
+ """In this test we check that if a rollback is done on an update
+ The original content is preserved
+ """
+ self.env["ir.config_parameter"].sudo().set_param(
+ "ir_attachment.location", "tmp_dir"
+ )
+ content = b"Transactional update"
+ attachment = self.env["ir.attachment"].create(
+ {"name": "test.txt", "raw": content}
+ )
+ self.env.flush_all()
+ self.assertEqual(attachment.raw, content)
+
+ initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+
+ self.assertEqual(attachment.store_fname, f"tmp_dir://{initial_filename}")
+ self.assertEqual(attachment.fs_filename, initial_filename)
+ self.assertEqual(
+ os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
+ )
+
+ orignal_store_fname = attachment.store_fname
+ try:
+ with self.env.cr.savepoint():
+ attachment.raw = b"updated"
+ new_filename = f"{self.temp_dir}/test-{attachment.id}-1.txt"
+ new_store_fname = f"tmp_dir://{new_filename}"
+ self.assertEqual(attachment.store_fname, new_store_fname)
+ self.assertEqual(attachment.fs_filename, new_filename)
+ # at this stage the original file and the new file are present
+ # in the list of files to GC
+ gc_files = self.gc_file_model.search([]).mapped("store_fname")
+ self.assertIn(orignal_store_fname, gc_files)
+ self.assertIn(orignal_store_fname, gc_files)
+ raise MyException("dummy exception")
+ except MyException:
+ ...
+ attachment.invalidate_recordset()
+ self.env.flush_all()
+ self.assertEqual(attachment.store_fname, f"tmp_dir://{initial_filename}")
+ self.assertEqual(attachment.fs_filename, initial_filename)
+ self.assertEqual(attachment.raw, content)
+ self.assertEqual(
+ set(os.listdir(self.temp_dir)),
+ {os.path.basename(initial_filename), os.path.basename(new_filename)},
+ )
+ # in test mode, gc collector is not run into a separate transaction
+ # therefore it has been reset. We manually add our two store_fname
+ # to the list of files to GC
+ self.gc_file_model._mark_for_gc(orignal_store_fname)
+ self.gc_file_model._mark_for_gc(new_store_fname)
+ # run gc
+ self.gc_file_model._gc_files_unsafe()
+ self.assertEqual(
+ os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
+ )
+
+ def test_fs_create_transactional(self):
+ """In this test we check that if a rollback is done on a create
+ The file is removed
+ """
+ self.env["ir.config_parameter"].sudo().set_param(
+ "ir_attachment.location", "tmp_dir"
+ )
+ content = b"Transactional create"
+ try:
+
+ with self.env.cr.savepoint():
+ attachment = self.env["ir.attachment"].create(
+ {"name": "test.txt", "raw": content}
+ )
+ self.env.flush_all()
+ self.assertEqual(attachment.raw, content)
+ initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ self.assertEqual(
+ attachment.store_fname, f"tmp_dir://{initial_filename}"
+ )
+ self.assertEqual(attachment.fs_filename, initial_filename)
+ self.assertEqual(
+ os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
+ )
+ new_store_fname = attachment.store_fname
+ # at this stage the new file is into the list of files to GC
+ gc_files = self.gc_file_model.search([]).mapped("store_fname")
+ self.assertIn(new_store_fname, gc_files)
+ raise MyException("dummy exception")
+ except MyException:
+ ...
+ self.env.flush_all()
+ # in test mode, gc collector is not run into a separate transaction
+ # therefore it has been reset. We manually add our new file to the
+ # list of files to GC
+ self.gc_file_model._mark_for_gc(new_store_fname)
+ # run gc
+ self.gc_file_model._gc_files_unsafe()
+ self.assertEqual(os.listdir(self.temp_dir), [])
+
+ def test_no_gc_if_disabled_on_storage(self):
+ store_fname = "tmp_dir://dummy-0-0.txt"
+ self.gc_file_model._mark_for_gc(store_fname)
+ self.temp_backend.autovacuum_gc = False
+ self.gc_file_model._gc_files_unsafe()
+ self.assertIn(store_fname, self.gc_file_model.search([]).mapped("store_fname"))
+ self.temp_backend.autovacuum_gc = False
+ self.gc_file_model._gc_files_unsafe()
+ self.assertIn(store_fname, self.gc_file_model.search([]).mapped("store_fname"))
+ self.temp_backend.autovacuum_gc = True
+ self.gc_file_model._gc_files_unsafe()
+ self.assertNotIn(
+ store_fname, self.gc_file_model.search([]).mapped("store_fname")
+ )
+
+
+class MyException(Exception):
+ """Exception to be raised into tests ensure that we trap only this
+ exception and not other exceptions raised by the test"""
diff --git a/fs_attachment/tests/test_fs_attachment_internal_url.py b/fs_attachment/tests/test_fs_attachment_internal_url.py
new file mode 100644
index 0000000000..61d0119e33
--- /dev/null
+++ b/fs_attachment/tests/test_fs_attachment_internal_url.py
@@ -0,0 +1,74 @@
+# Copyright 2023 ACSONE SA/NV (http://acsone.eu).
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
+import os
+import shutil
+import tempfile
+
+from odoo.tests.common import HttpCase
+
+
+class TestFsAttachmentInternalUrl(HttpCase):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
+ cls.backend = cls.env.ref("fs_storage.default_fs_storage")
+ temp_dir = tempfile.mkdtemp()
+ cls.temp_backend = cls.env["fs.storage"].create(
+ {
+ "name": "Temp FS Storage",
+ "protocol": "file",
+ "code": "tmp_dir",
+ "directory_path": temp_dir,
+ }
+ )
+ cls.temp_dir = temp_dir
+ cls.gc_file_model = cls.env["fs.file.gc"]
+ cls.content = b"This is a test attachment"
+ cls.attachment = (
+ cls.env["ir.attachment"]
+ .with_context(
+ storage_location=cls.temp_backend.code,
+ storage_file_path="test.txt",
+ )
+ .create({"name": "test.txt", "raw": cls.content})
+ )
+
+ @cls.addClassCleanup
+ def cleanup_tempdir():
+ shutil.rmtree(temp_dir)
+
+ def tearDown(self) -> None:
+ super().tearDown()
+ # empty the temp dir
+ for f in os.listdir(self.temp_dir):
+ os.remove(os.path.join(self.temp_dir, f))
+
+ def assertDownload(
+ self, url, headers, assert_status_code, assert_headers, assert_content=None
+ ):
+ res = self.url_open(url, headers=headers)
+ res.raise_for_status()
+ self.assertEqual(res.status_code, assert_status_code)
+ for header_name, header_value in assert_headers.items():
+ self.assertEqual(
+ res.headers.get(header_name),
+ header_value,
+ f"Wrong value for header {header_name}",
+ )
+ if assert_content:
+ self.assertEqual(res.content, assert_content, "Wong content")
+ return res
+
+ def test_fs_attachment_internal_url(self):
+ self.authenticate("admin", "admin")
+ self.assertDownload(
+ self.attachment.internal_url,
+ headers={},
+ assert_status_code=200,
+ assert_headers={
+ "Content-Type": "text/plain; charset=utf-8",
+ "Content-Disposition": "inline; filename=test.txt",
+ },
+ assert_content=self.content,
+ )
diff --git a/fs_attachment/views/fs_storage.xml b/fs_attachment/views/fs_storage.xml
new file mode 100644
index 0000000000..686e00ec2d
--- /dev/null
+++ b/fs_attachment/views/fs_storage.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+ fs.storage.form (in fs_attachment)
+ fs.storage
+
+
+
+
+
+
+
+
+
+
+
From c43e0e006297c29bf187a001137005ed8c87b57e Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Fri, 14 Apr 2023 17:53:55 +0200
Subject: [PATCH 33/47] add tests and fixes for urls, allows to define the
default storage for attachment on the fs.storage itself
---
fs_attachment/models/fs_storage.py | 165 ++++++++++++++++++++--
fs_attachment/models/ir_attachment.py | 60 +++-----
fs_attachment/tests/test_fs_attachment.py | 24 +++-
3 files changed, 198 insertions(+), 51 deletions(-)
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
index 617aa1f94c..fc98327fb3 100644
--- a/fs_attachment/models/fs_storage.py
+++ b/fs_attachment/models/fs_storage.py
@@ -1,7 +1,9 @@
# Copyright 2023 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
-from odoo import api, fields, models, tools
+from odoo import _, api, fields, models, tools
+from odoo.exceptions import ValidationError
+from odoo.tools.safe_eval import const_eval
from .ir_attachment import IrAttachment
@@ -19,7 +21,6 @@ class FsStorage(models.Model):
"directory, avoiding overcrowding in the root directory and optimizing "
"access times."
)
-
autovacuum_gc = fields.Boolean(
string="Autovacuum Garbage Collection",
default=True,
@@ -42,6 +43,107 @@ class FsStorage(models.Model):
"or the internal url with direct access to the storage. "
"This could save you some money if you pay by CDN traffic."
)
+ use_as_default_for_attachments = fields.Boolean(
+ help="If checked, this storage will be used to store all the attachments ",
+ default=False,
+ )
+ force_db_for_default_attachment_rules = fields.Text(
+ help="When storing attachments in an external storage, storage may be slow."
+ "If the storage is used to store odoo attachments by default, this could lead "
+ "to a bad user experience since small images (128, 256) are used in Odoo "
+ "in list / kanban views. We want them to be fast to read."
+ "This field allows to force the store of some attachments in the odoo "
+ "database. The value is a dict Where the key is the beginning of the "
+ "mimetype to configure and the value is the limit in size below which "
+ "attachments are kept in DB. 0 means no limit.\n"
+ "Default configuration means:\n"
+ "* images mimetypes (image/png, image/jpeg, ...) below 50KB are stored "
+ "in database\n"
+ "* application/javascript are stored in database whatever their size \n"
+ "* text/css are stored in database whatever their size",
+ default=lambda self: self._default_force_db_for_default_attachment_rules,
+ )
+
+ _sql_constraints = [
+ (
+ "use_as_default_for_attachments_unique",
+ "unique(use_as_default_for_attachments)",
+ "Only one storage can be used as default for attachments",
+ )
+ ]
+
+ @property
+ def _default_force_db_for_default_attachment_rules(self) -> str:
+ return '{"image/": 51200, "application/javascript": 0, "text/css": 0}'
+
+ @api.onchange("use_as_default_for_attachments")
+ def _onchange_use_as_default_for_attachments(self):
+ if not self.use_as_default_for_attachments:
+ self.force_db_for_default_attachment_rules = ""
+ else:
+ self.force_db_for_default_attachment_rules = (
+ self._default_force_db_for_default_attachment_rules
+ )
+
+ @api.model_create_multi
+ def create(self, vals_list):
+ for vals in vals_list:
+ if not vals.get("use_as_default_for_attachments"):
+ vals["force_db_for_default_attachment_rules"] = ""
+ return super().create(vals_list)
+
+ def write(self, vals):
+ if not vals.get("use_as_default_for_attachments"):
+ vals["force_db_for_default_attachment_rules"] = ""
+ return super().write(vals)
+
+ @api.constrains(
+ "force_db_for_default_attachment_rules", "use_as_default_for_attachments"
+ )
+ def _check_force_db_for_default_attachment_rules(self):
+ for rec in self:
+ if not rec.force_db_for_default_attachment_rules:
+ continue
+ if not rec.use_as_default_for_attachments:
+ raise ValidationError(
+ _(
+ "The force_db_for_default_attachment_rules can only be set "
+ "if the storage is used as default for attachments."
+ )
+ )
+ try:
+ const_eval(rec.force_db_for_default_attachment_rules)
+ except (SyntaxError, TypeError, ValueError) as e:
+ raise ValidationError(
+ _(
+ "The force_db_for_default_attachment_rules is not a valid "
+ "python dict."
+ )
+ ) from e
+
+ @api.model
+ @tools.ormcache()
+ def get_default_storage_code_for_attachments(self):
+ """Return the code of the storage to use to store by default the attachments"""
+ storage = self.search([("use_as_default_for_attachments", "=", True)], limit=1)
+ if storage:
+ return storage.code
+ return None
+
+ @api.model
+ @tools.ormcache("code")
+ def get_force_db_for_default_attachment_rules(self, code):
+ """Return the rules to force the storage of some attachments in the DB
+
+ :param code: the code of the storage
+ :return: a dict where the key is the beginning of the mimetype to configure
+ and the value is the limit in size below which attachments are kept in DB.
+ 0 means no limit.
+ """
+ storage = self.search([("code", "=", code)], limit=1)
+ if storage and storage.force_db_for_default_attachment_rules:
+ return const_eval(storage.force_db_for_default_attachment_rules)
+ return {}
@api.model
@tools.ormcache("code")
@@ -62,9 +164,9 @@ def _compute_base_url_for_files(self):
rec.base_url_for_files = ""
continue
parts = [rec.base_url]
- if rec.is_directory_path_in_url:
+ if rec.is_directory_path_in_url and rec.directory_path:
parts.append(rec.directory_path)
- rec.base_url_for_files = "/".join(parts)
+ rec.base_url_for_files = self._normalize_url("/".join(parts))
@api.model
def _get_url_for_attachment(
@@ -81,7 +183,54 @@ def _get_url_for_attachment(
base_url = fs_storage.base_url_for_files
if not base_url:
return None
- if not exclude_base_url:
- base_url = base_url.replace(base_url, "") or "/"
- parts = [base_url, attachment.fs_filename]
- return "/".join([x.rstrip("/") for x in parts if x])
+ if exclude_base_url:
+ base_url = base_url.replace(fs_storage.base_url, "") or "/"
+ # always remove the directory_path from the fs_file_name
+ # ony if it's at the start of the filename
+ fs_filename = attachment.fs_filename
+ if fs_filename.startswith(fs_storage.directory_path):
+ fs_filename = fs_filename.replace(fs_storage.directory_path, "")
+ parts = [base_url, fs_filename]
+ return self._normalize_url("/".join(parts))
+
+ @api.model
+ def _normalize_url(self, url: str) -> str:
+ """Normalize the URL
+
+ :param url: the URL to normalize
+ :return: the normalized URL
+ remove all the double slashes and the trailing slash except if the URL
+ is only a slash (in this case we return a single slash). Avoid to remove
+ the double slash in the protocol part of the URL.
+ """
+ if url == "/":
+ return url
+ parts = url.split("/")
+ parts = [x for x in parts if x]
+ if not parts:
+ return "/"
+ if parts[0].endswith(":"):
+ parts[0] = parts[0] + "/"
+ else:
+ # we preserve the trailing slash if the URL is absolute
+ parts[0] = "/" + parts[0]
+ return "/".join(parts)
+
+ def recompute_urls(self) -> None:
+ """Recompute the URL of all attachments since the base_url or the
+ directory_path has changed. This method must be explicitly called
+ by the user since we don't want to recompute the URL on each change
+ of the base_url or directory_path. We could also have cases where such
+ a recompute is not wanted. For example, when you restore a database
+ from production to staging, you don't want to recompute the URL of
+ the attachments created in production (since the directory_path use
+ in production is readonly for the staging database) but you change the
+ directory_path of the staging database to ensure that all the moditications
+ in staging are done in a different directory and will not impact the
+ production.
+ """
+ attachments = self.env["ir.attachment"].search(
+ [("fs_storage_id", "in", self.ids)]
+ )
+ attachments._compute_fs_url()
+ attachments._compute_fs_url_path()
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index 19b3e4030d..b4755502b3 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -17,7 +17,6 @@
import odoo
from odoo import _, api, exceptions, fields, models
from odoo.osv.expression import AND, OR, normalize_domain
-from odoo.tools.safe_eval import const_eval
from .strtobool import strtobool
@@ -26,6 +25,10 @@
REGEX_SLUGIFY = r"[^-a-z0-9_]+"
+FS_FILENAME_RE_PARSER = re.compile(
+ r"^(?P.+)-(?P\d+)-(?P\d+)(?P\..+)$"
+)
+
def is_true(strval):
return bool(strtobool(strval or "0"))
@@ -137,32 +140,10 @@ def _is_storage_disabled(storage=None, log=True):
_logger.warning(msg)
return is_disabled
- @property
- def _object_storage_default_force_db_config(self):
- return {"image/": 51200, "application/javascript": 0, "text/css": 0}
-
def _get_storage_force_db_config(self):
- param = (
- self.env["ir.config_parameter"]
- .sudo()
- .get_param(
- "ir_attachment.storage.force.database",
- )
+ return self.env["fs.storage"].get_force_db_for_default_attachment_rules(
+ self._storage()
)
- storage_config = None
- if param:
- try:
- storage_config = const_eval(param)
- except (SyntaxError, TypeError, ValueError):
- _logger.exception(
- "Could not parse system parameter"
- " 'ir_attachment.storage.force.database', reverting to the"
- " default configuration."
- )
-
- if not storage_config:
- storage_config = self._object_storage_default_force_db_config
- return storage_config
def _store_in_db_instead_of_object_storage_domain(self):
"""Return a domain for attachments that must be forced to DB
@@ -256,6 +237,14 @@ def _get_datas_related_values(self, data, mimetype):
###########################################################
# Odoo methods that we override to use the object storage #
###########################################################
+ @api.model
+ def _storage(self):
+ # We check if a filesystem storage is configured for attachments
+ storage = self.env["fs.storage"].get_default_storage_code_for_attachments()
+ if not storage:
+ # If not, we use the default storage configured into odoo
+ storage = super()._storage()
+ return storage
@api.model_create_multi
def create(self, vals_list):
@@ -428,17 +417,11 @@ def _is_fs_filename_meaningful(self, filename: str) -> bool:
"""Return True if the filename is meaningful
A filename is meaningful if it's formatted as
"""
- if not filename:
+ parsed = self._parse_fs_filename(filename)
+ if not parsed:
return False
- filename = os.path.basename(filename)
- re_fs_filename_parser = re.compile(
- r"^(?P.+)-(?P\d+)-(?P\d+)(?P\..+)$"
- )
- match = re_fs_filename_parser.match(filename)
- if not match:
- return False
- name, res_id, version, extension = match.groups()
- return bool(name and res_id and version and extension)
+ name, res_id, version, extension = parsed
+ return bool(name and res_id and version is not None and extension)
@api.model
def _parse_fs_filename(self, filename: str) -> tuple[str, int, int, str] | None:
@@ -446,12 +429,9 @@ def _parse_fs_filename(self, filename: str) -> tuple[str, int, int, str] | None:
--.
"""
if not filename:
- return "", "", "", ""
+ return None
filename = os.path.basename(filename)
- re_fs_filename_parser = re.compile(
- r"^(?P.+)-(?P\d+)-(?P\d+)(?P\..+)$"
- )
- match = re_fs_filename_parser.match(filename)
+ match = FS_FILENAME_RE_PARSER.match(filename)
if not match:
return None
name, res_id, version, extension = match.groups()
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index c653c1d983..c49ad3fc38 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -210,9 +210,7 @@ def test_fs_create_transactional(self):
"""In this test we check that if a rollback is done on a create
The file is removed
"""
- self.env["ir.config_parameter"].sudo().set_param(
- "ir_attachment.location", "tmp_dir"
- )
+ self.temp_backend.use_as_default_for_attachments = True
content = b"Transactional create"
try:
@@ -261,6 +259,26 @@ def test_no_gc_if_disabled_on_storage(self):
store_fname, self.gc_file_model.search([]).mapped("store_fname")
)
+ def test_attachment_fs_url(self):
+ self.temp_backend.base_url = "https://acsone.eu/media"
+ self.env["ir.config_parameter"].sudo().set_param(
+ "ir_attachment.location", "tmp_dir"
+ )
+ content = b"Transactional update"
+ attachment = self.env["ir.attachment"].create(
+ {"name": "test.txt", "raw": content}
+ )
+ self.env.flush_all()
+ attachment_path = f"/test-{attachment.id}-0.txt"
+ self.assertEqual(attachment.fs_url, f"https://acsone.eu/media{attachment_path}")
+ self.assertEqual(attachment.fs_url_path, attachment_path)
+
+ self.temp_backend.is_directory_path_in_url = True
+ self.temp_backend.recompute_urls()
+ attachment_path = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ self.assertEqual(attachment.fs_url, f"https://acsone.eu/media{attachment_path}")
+ self.assertEqual(attachment.fs_url_path, attachment_path)
+
class MyException(Exception):
"""Exception to be raised into tests ensure that we trap only this
From e424ee94458ca630c965690b41801f83cd52f5a0 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 17 Apr 2023 15:00:57 +0200
Subject: [PATCH 34/47] =?UTF-8?q?add=20tests=20and=20fixes=C3=A9?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
fs_attachment/models/fs_storage.py | 8 ++-
fs_attachment/models/ir_attachment.py | 57 +++++++++++-----
fs_attachment/tests/test_fs_attachment.py | 83 +++++++++++++++++++----
fs_attachment/views/fs_storage.xml | 7 ++
4 files changed, 120 insertions(+), 35 deletions(-)
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
index fc98327fb3..4848536652 100644
--- a/fs_attachment/models/fs_storage.py
+++ b/fs_attachment/models/fs_storage.py
@@ -89,12 +89,14 @@ def _onchange_use_as_default_for_attachments(self):
def create(self, vals_list):
for vals in vals_list:
if not vals.get("use_as_default_for_attachments"):
- vals["force_db_for_default_attachment_rules"] = ""
+ vals["force_db_for_default_attachment_rules"] = None
return super().create(vals_list)
def write(self, vals):
- if not vals.get("use_as_default_for_attachments"):
- vals["force_db_for_default_attachment_rules"] = ""
+ if "use_as_default_for_attachments" in vals:
+ if not vals["use_as_default_for_attachments"]:
+ vals["force_db_for_default_attachment_rules"] = None
+ return super().write(vals)
return super().write(vals)
@api.constrains(
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index b4755502b3..41aecd9b16 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -15,7 +15,8 @@
from slugify import slugify # pylint: disable=missing-manifest-dependency
import odoo
-from odoo import _, api, exceptions, fields, models
+from odoo import _, api, fields, models
+from odoo.exceptions import AccessError, UserError
from odoo.osv.expression import AND, OR, normalize_domain
from .strtobool import strtobool
@@ -186,8 +187,8 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
* better portability of a database: when replicating a production
instance for dev, the assets are included
- The configuration can be modified in the ir.config_parameter
- ``ir_attachment.storage.force.database``, as a dictionary, for
+ The configuration can be modified on the fs.storage record, in the
+ field ``force_db_for_default_attachment_rules``, as a dictionary, for
instance::
{"image/": 51200, "application/javascript": 0, "text/css": 0}
@@ -196,15 +197,12 @@ def _store_in_db_instead_of_object_storage(self, data, mimetype):
value is the limit in size below which attachments are kept in DB.
0 means no limit.
- Default configuration means:
+ These limits are applied only if the storage is the default one for
+ attachments (see ``_storage``).
- * images mimetypes (image/png, image/jpeg, ...) below 51200 bytes are
- stored in database
- * application/javascript are stored in database whatever their size
- * text/css are stored in database whatever their size
-
- The conditions must be inline with the domain in
- ``_store_in_db_instead_of_object_storage_domain``.
+ The conditions are also applied into the domain of the method
+ ``_store_in_db_instead_of_object_storage_domain`` used to move records
+ from a filesystem storage to the database.
"""
if self._is_storage_disabled():
@@ -252,6 +250,31 @@ def create(self, vals_list):
attachments._enforce_meaningful_storage_filename()
return attachments
+ def write(self, vals):
+ if ("datas" in vals or "raw" in vals) and not (
+ "name" in vals or "mimetype" in vals
+ ):
+ # When we write on an attachment, if the mimetype is not provided, it
+ # will be computed from the name. The problem is that if you assign a
+ # value to the field ``datas`` or ``raw``, the name is not provided
+ # nor the mimetype, so the mimetype will be set to ``application/octet-
+ # stream``.
+ # We want to avoid this, so we take the mimetype of the first attachment
+ # and we set it on all the attachments if they all have the same mimetype.
+ # If they don't have the same mimetype, we raise an error.
+ # OPW-3277070
+ mimetypes = self.mapped("mimetype")
+ if len(set(mimetypes)) == 1:
+ vals["mimetype"] = mimetypes[0]
+ else:
+ raise UserError(
+ _(
+ "You can't write on multiple attachments with different "
+ "mimetypes at the same time."
+ )
+ )
+ return super().write(vals)
+
@api.model
def _file_read(self, fname):
if self._is_file_from_a_storage(fname):
@@ -609,9 +632,7 @@ def _move_attachment_to_store(self):
@api.model
def force_storage(self):
if not self.env["res.users"].browse(self.env.uid)._is_admin():
- raise exceptions.AccessError(
- _("Only administrators can execute this action.")
- )
+ raise AccessError(_("Only administrators can execute this action."))
location = self.env.context.get("storage_location") or self._storage()
if location not in self._get_storage_codes():
return super().force_storage()
@@ -675,10 +696,12 @@ def force_storage_to_db_for_special_fields(self, new_cr=False):
# this write will read the datas from the Object Storage and
# write them back in the DB (the logic for location to write is
# in the 'datas' inverse computed field)
+ # we need to write the mimetype too, otherwise it will be
+ # overwritten with 'application/octet-stream' on assets. On each
+ # write, the mimetype is recomputed if not given. If we don't
+ # pass it nor the name, the mimetype will be set to the default
+ # value 'application/octet-stream' on assets.
attachment.write({"datas": attachment.datas})
- # as the file will potentially be dropped on the bucket,
- # we should commit the changes here
- new_env.cr.commit()
if current % 100 == 0 or total - current == 0:
_logger.info(
"attachment %s/%s after %.2fs",
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index c49ad3fc38..9ed257d15c 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -3,6 +3,7 @@
import os
import shutil
import tempfile
+from unittest import mock
from odoo.tests.common import TransactionCase
@@ -24,6 +25,7 @@ def setUpClass(cls):
)
cls.temp_dir = temp_dir
cls.gc_file_model = cls.env["fs.file.gc"]
+ cls.ir_attachment_model = cls.env["ir.attachment"]
@cls.addClassCleanup
def cleanup_tempdir():
@@ -49,6 +51,7 @@ def test_create_attachment_explicit_location(self):
self.assertEqual(os.listdir(self.temp_dir), [f"test-{attachment.id}-0.txt"])
self.assertEqual(attachment.raw, content)
self.assertFalse(attachment.db_datas)
+ self.assertEqual(attachment.mimetype, "text/plain")
with attachment.open("rb") as f:
self.assertEqual(f.read(), content)
@@ -63,11 +66,12 @@ def test_create_attachment_explicit_location(self):
def test_open_attachment_in_db(self):
self.env["ir.config_parameter"].sudo().set_param("ir_attachment.location", "db")
content = b"This is a test attachment in db"
- attachment = self.env["ir.attachment"].create(
+ attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": content}
)
self.assertFalse(attachment.store_fname)
self.assertTrue(attachment.db_datas)
+ self.assertEqual(attachment.mimetype, "text/plain")
with attachment.open("rb") as f:
self.assertEqual(f.read(), content)
with self.assertRaisesRegex(SystemError, "Write mode is not supported"):
@@ -78,7 +82,7 @@ def test_attachment_open_in_filestore(self):
"ir_attachment.location", "file"
)
content = b"This is a test attachment in filestore"
- attachment = self.env["ir.attachment"].create(
+ attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": content}
)
self.assertTrue(attachment.store_fname)
@@ -95,16 +99,15 @@ def test_attachment_open_in_filestore(self):
self.assertEqual(attachment.raw, b"new")
def test_default_attachment_store_in_fs(self):
- self.env["ir.config_parameter"].sudo().set_param(
- "ir_attachment.location", "tmp_dir"
- )
+ self.temp_backend.use_as_default_for_attachments = True
content = b"This is a test attachment in filestore tmp_dir"
- attachment = self.env["ir.attachment"].create(
+ attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": content}
)
self.assertTrue(attachment.store_fname)
self.assertFalse(attachment.db_datas)
self.assertEqual(attachment.raw, content)
+ self.assertEqual(attachment.mimetype, "text/plain")
self.env.flush_all()
initial_filename = f"test-{attachment.id}-0.txt"
@@ -129,6 +132,7 @@ def test_default_attachment_store_in_fs(self):
self.assertEqual(
attachment.store_fname, f"tmp_dir://{self.temp_dir}/{new_filename}"
)
+ self.assertEqual(attachment.mimetype, "text/plain")
# the original file is to to be deleted by the GC
self.assertEqual(
@@ -152,11 +156,9 @@ def test_fs_update_transactionnal(self):
"""In this test we check that if a rollback is done on an update
The original content is preserved
"""
- self.env["ir.config_parameter"].sudo().set_param(
- "ir_attachment.location", "tmp_dir"
- )
+ self.temp_backend.use_as_default_for_attachments = True
content = b"Transactional update"
- attachment = self.env["ir.attachment"].create(
+ attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": content}
)
self.env.flush_all()
@@ -191,6 +193,7 @@ def test_fs_update_transactionnal(self):
self.assertEqual(attachment.store_fname, f"tmp_dir://{initial_filename}")
self.assertEqual(attachment.fs_filename, initial_filename)
self.assertEqual(attachment.raw, content)
+ self.assertEqual(attachment.mimetype, "text/plain")
self.assertEqual(
set(os.listdir(self.temp_dir)),
{os.path.basename(initial_filename), os.path.basename(new_filename)},
@@ -215,7 +218,7 @@ def test_fs_create_transactional(self):
try:
with self.env.cr.savepoint():
- attachment = self.env["ir.attachment"].create(
+ attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": content}
)
self.env.flush_all()
@@ -261,11 +264,9 @@ def test_no_gc_if_disabled_on_storage(self):
def test_attachment_fs_url(self):
self.temp_backend.base_url = "https://acsone.eu/media"
- self.env["ir.config_parameter"].sudo().set_param(
- "ir_attachment.location", "tmp_dir"
- )
+ self.temp_backend.use_as_default_for_attachments = True
content = b"Transactional update"
- attachment = self.env["ir.attachment"].create(
+ attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": content}
)
self.env.flush_all()
@@ -279,6 +280,58 @@ def test_attachment_fs_url(self):
self.assertEqual(attachment.fs_url, f"https://acsone.eu/media{attachment_path}")
self.assertEqual(attachment.fs_url_path, attachment_path)
+ def test_force_attachment_in_db_rules(self):
+ self.temp_backend.use_as_default_for_attachments = True
+ # force storage in db for text/plain
+ self.temp_backend.force_db_for_default_attachment_rules = '{"text/plain": 0}'
+ attachment = self.ir_attachment_model.create(
+ {"name": "test.txt", "raw": b"content"}
+ )
+ self.env.flush_all()
+ self.assertFalse(attachment.store_fname)
+ self.assertEqual(attachment.db_datas, b"content")
+ self.assertEqual(attachment.mimetype, "text/plain")
+
+ def test_force_storage_to_db(self):
+ self.temp_backend.use_as_default_for_attachments = True
+ attachment = self.ir_attachment_model.create(
+ {"name": "test.txt", "raw": b"content"}
+ )
+ self.env.flush_all()
+ self.assertTrue(attachment.store_fname)
+ self.assertFalse(attachment.db_datas)
+ store_fname = attachment.store_fname
+ # we change the rules to force the storage in db for text/plain
+ self.temp_backend.force_db_for_default_attachment_rules = '{"text/plain": 0}'
+ attachment.force_storage_to_db_for_special_fields()
+ self.assertFalse(attachment.store_fname)
+ self.assertEqual(attachment.db_datas, b"content")
+ # we check that the file is marked for GC
+ gc_files = self.gc_file_model.search([]).mapped("store_fname")
+ self.assertIn(store_fname, gc_files)
+
+ def test_force_storage_to_fs(self):
+ attachment = self.ir_attachment_model.create(
+ {"name": "test.txt", "raw": b"content"}
+ )
+ self.env.flush_all()
+ fs_path = self.ir_attachment_model._filestore() + "/" + attachment.store_fname
+ self.assertTrue(os.path.exists(fs_path))
+ self.assertEqual(os.listdir(self.temp_dir), [])
+ # we decide to force the storage in the filestore
+ self.temp_backend.use_as_default_for_attachments = True
+ with mock.patch.object(self.env.cr, "commit"), mock.patch(
+ "odoo.addons.fs_attachment.models.ir_attachment.clean_fs"
+ ) as clean_fs:
+ self.ir_attachment_model.force_storage()
+ clean_fs.assert_called_once()
+ # files into the filestore must be moved to our filesystem storage
+ filename = f"test-{attachment.id}-0.txt"
+ self.assertEqual(
+ attachment.store_fname, f"tmp_dir://{self.temp_dir}/{filename}"
+ )
+ self.assertIn(filename, os.listdir(self.temp_dir))
+
class MyException(Exception):
"""Exception to be raised into tests ensure that we trap only this
diff --git a/fs_attachment/views/fs_storage.xml b/fs_attachment/views/fs_storage.xml
index 686e00ec2d..303aebdd92 100644
--- a/fs_attachment/views/fs_storage.xml
+++ b/fs_attachment/views/fs_storage.xml
@@ -12,6 +12,13 @@
+
+
From 528fd2c38aa36298a745f92f4307a87f9991fe22 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 17 Apr 2023 16:48:47 +0200
Subject: [PATCH 35/47] add doc and fix deletion
---
fs_attachment/README.rst | 238 ++++++--
fs_attachment/models/fs_file_gc.py | 12 +-
fs_attachment/models/ir_attachment.py | 7 +-
fs_attachment/readme/DESCRIPTION.rst | 17 +
fs_attachment/readme/USAGE.rst | 116 +++-
fs_attachment/static/description/index.html | 568 ++++++++++++++++++++
fs_attachment/tests/test_fs_attachment.py | 38 ++
fs_attachment/views/fs_storage.xml | 4 +
8 files changed, 937 insertions(+), 63 deletions(-)
create mode 100644 fs_attachment/static/description/index.html
diff --git a/fs_attachment/README.rst b/fs_attachment/README.rst
index 0ff25c997c..e714b35d3b 100644
--- a/fs_attachment/README.rst
+++ b/fs_attachment/README.rst
@@ -1,46 +1,220 @@
-Base class for attachments on external object store
-===================================================
+============================
+Base Attachment Object Store
+============================
-This is a base addon that regroup common code used by addons targeting specific object store
+.. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !! This file is generated by oca-gen-addon-readme !!
+ !! changes will be overwritten. !!
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+.. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png
+ :target: https://odoo-community.org/page/development-status
+ :alt: Beta
+.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png
+ :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html
+ :alt: License: AGPL-3
+.. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fstorage-lightgray.png?logo=github
+ :target: https://github.com/OCA/storage/tree/16.0/fs_attachment
+ :alt: OCA/storage
+.. |badge4| image:: https://img.shields.io/badge/weblate-Translate%20me-F47D42.png
+ :target: https://translation.odoo-community.org/projects/storage-16-0/storage-16-0-fs_attachment
+ :alt: Translate me on Weblate
+.. |badge5| image:: https://img.shields.io/badge/runbot-Try%20me-875A7B.png
+ :target: https://runbot.odoo-community.org/runbot/275/16.0
+ :alt: Try me on Runbot
+
+|badge1| |badge2| |badge3| |badge4| |badge5|
+
+In some cases, you need to store attachment in another system that the Odoo's
+filestore. For example, when your deployment is based on a multi-server
+architecture to ensure redundancy and scalability, your attachments must
+be stored in a way that they are accessible from all the servers. In this
+way, you can use a shared storage system like NFS or a cloud storage like
+S3 compliant storage, or....
+
+This addon extend the storage mechanism of Odoo's attachments to allow
+you to store them in any storage filesystem supported by the Python
+library `fsspec `_ and made
+available via the `fs_storage` addon.
+
+In contrast to Odoo, when a file is stored into an external storage, this
+addon ensures that the filename keeps its meaning (In odoo the filename
+into the filestore is the file content checksum). Concretely the filename
+is based on the pattern:
+'--.'
+
+Last but not least, this addon adds on the attachments 2 new fields to use
+to retrieve the file content from a URL:
+
+* ``Internal URL``: URL to retrieve the file content from the Odoo's
+ filestore.
+* ``Filesystem URL``: URL to retrieve the file content from the external
+ storage.
+
+.. note::
+
+ The internal URL is always available, but the filesystem URL is only
+ available when the attachment is stored in an external storage.
+ Particular attention has been paid to limit as much as possible the consumption
+ of resources necessary to serve via Odoo the content stored in an external
+ filesystem. The implementation is based on an end-to-end streaming of content
+ between the external filesystem and the Odoo client application.
+
+**Table of contents**
+
+.. contents::
+ :local:
+
+Usage
+=====
Configuration
--------------
+~~~~~~~~~~~~~
+
+The configuration is done through the creation of a filesytem storage record
+into odoo. To create a new storage, go to the menu
+``Settings > Technical > FS Storage`` and click on ``Create``.
+
+In addition to the common fields available to configure a storage, specifics
+fields are available under the section 'Attachment' to configure the way
+attachments will be stored in the filesystem.
+
+* ``Optimizes Directory Path``: This option is useful if you need to prevent
+ having too many files in a single directory. It will create a directory
+ structure based on the attachment's checksum (with 2 levels of depth)
+ For example, if the checksum is ``123456789``, the file will be stored in the
+ directory ``/path/to/storage/12/34/my_file-1-0.txt``.
+* ``Autovacuum GC``: This is used to automatically remove files from the filesystem
+ when it's no longer referenced in Odoo. Some storage backends (like S3) may
+ charge you for the storage of files, so it's important to remove them when
+ they're no longer needed. In some cases, this option is not desirable, for
+ example if you're using a storage backend to store images shared with others
+ systems (like your website) and you don't want to remove the files from the
+ storage while they're still referenced into the others systems.
+ This mechanism is based on a ``fs.file.gc`` model used to collect the files
+ to remove. This model is automatically populated by the ``ir.attachment``
+ model when a file is removed from the database. If you disable this option,
+ you'll have to manually take care of the records in the ``fs.file.gc`` for
+ your filesystem storage.
+* ``Use As Default For Attachment``: This options allows you to declare the storage
+ as the default one for attachments. If you have multiple filesystem storage
+ configured, you can choose which one will be used by default for attachments.
+ Once activated, attachments created without specifying a storage will be
+ stored in this default storage.
+* ``Force DB For Default Attachment Rules``: This option is useful if you want to
+ force the storage of some attachments in the database, even if you have a
+ default filesystem storage configured. This is specially useful when you're
+ using a storage backend like S3, where the latency of the network can be
+ high. This option is a JSON field that allows you to define the mimetypes and
+ the size limit below which the attachments will be stored in the database.
+
+ Small images (128, 256) are used in Odoo in list / kanban views. We
+ want them to be fast to read.
+ They are generally < 50KB (default configuration) so they don't take
+ that much space in database, but they'll be read much faster than from
+ the object storage.
+
+ The assets (application/javascript, text/css) are stored in database
+ as well whatever their size is:
+
+ * a database doesn't have thousands of them
+ * of course better for performance
+ * better portability of a database: when replicating a production
+ instance for dev, the assets are included
+
+ The default configuration is:
+
+ {"image/": 51200, "application/javascript": 0, "text/css": 0}
+
+ Where the key is the beginning of the mimetype to configure and the
+ value is the limit in size below which attachments are kept in DB.
+ 0 means no limit.
+
+ Default configuration means:
+
+ * images mimetypes (image/png, image/jpeg, ...) below 50KB are
+ stored in database
+ * application/javascript are stored in database whatever their size
+ * text/css are stored in database whatever their size
+
+ This option is only available on the filesystem storage that is used
+ as default for attachments.
+
+Another key feature of this module is the ability to get access to the attachments
+from URLs.
+
+* ``Base URL``: This is the base URL used to access the attachments from the
+ filesystem storage itself.
+* ``Is Directory Path In URL``: Normally the directory patch configured on the storage
+ is not included in the URL. If you want to include it, you can activate this option.
+
+Tips & Tricks
+~~~~~~~~~~~~~
+
+* When working in multi staging environments, the management of the attachments
+ can be tricky. For example, if you have a production instance and a staging
+ instance based on a backup of the production environment, you may want to have
+ the attachments shared between the two instances BUT you don't want to have
+ one instance removing or modifying the attachments of the other instance.
+
+ To do so, you can configure the same filesystem storage on both instances and
+ use a different directory path. (For S3 storage, directory path is the bucket
+ name). When a file is written in the filesystem storage, it's always written into
+ the directory path configured on the storage and full path of the file is stored
+ in the database. When reading a file, it's always read from the full path stored
+ in the database. So if you have two instances using the same storage with different
+ directory paths, files written in each instance will be stored in different
+ directories but be accessible from the other instance. A check is also done when
+ an attachment is removed to ensure that only files stored in the current directory
+ path are removed.
+
+Bug Tracker
+===========
+
+Bugs are tracked on `GitHub Issues `_.
+In case of trouble, please check there if your issue has already been reported.
+If you spotted it first, help us smashing it by providing a detailed and welcomed
+`feedback `_.
+
+Do not contact contributors directly about support or help with technical issues.
-Object storage may be slow, and for this reason, we want to store
-some files in the database whatever.
+Credits
+=======
-Small images (128, 256) are used in Odoo in list / kanban views. We
-want them to be fast to read.
-They are generally < 50KB (default configuration) so they don't take
-that much space in database, but they'll be read much faster than from
-the object storage.
+Authors
+~~~~~~~
-The assets (application/javascript, text/css) are stored in database
-as well whatever their size is:
+* Camptocamp
+* ACSONE SA/NV
-* a database doesn't have thousands of them
-* of course better for performance
-* better portability of a database: when replicating a production
- instance for dev, the assets are included
+Contributors
+~~~~~~~~~~~~
-This storage configuration can be modified in the system parameter
-``ir_attachment.storage.force.database``, as a JSON value, for instance::
+Thierry Ducrest
+Guewen Baconnier
+Julien Coux
+Akim Juillerat
+Thomas Nowicki
+Vincent Renaville
+Denis Leemann
+Patrick Tombez
+Don Kendall
+Stephane Mangin
+Laurent Mignon
- {"image/": 51200, "application/javascript": 0, "text/css": 0}
+Maintainers
+~~~~~~~~~~~
-Where the key is the beginning of the mimetype to configure and the
-value is the limit in size below which attachments are kept in DB.
-0 means no limit.
+This module is maintained by the OCA.
-Default configuration means:
+.. image:: https://odoo-community.org/logo.png
+ :alt: Odoo Community Association
+ :target: https://odoo-community.org
-* images mimetypes (image/png, image/jpeg, ...) below 50KB are
- stored in database
-* application/javascript are stored in database whatever their size
-* text/css are stored in database whatever their size
+OCA, or the Odoo Community Association, is a nonprofit organization whose
+mission is to support the collaborative development of Odoo features and
+promote its widespread use.
-Disable attachment storage I/O
-------------------------------
+This module is part of the `OCA/storage `_ project on GitHub.
-Define a environment variable `DISABLE_ATTACHMENT_STORAGE` set to `1`
-This will prevent any kind of exceptions and read/write on storage attachments.
+You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.
diff --git a/fs_attachment/models/fs_file_gc.py b/fs_attachment/models/fs_file_gc.py
index 1e211da392..72d5ec3f3c 100644
--- a/fs_attachment/models/fs_file_gc.py
+++ b/fs_attachment/models/fs_file_gc.py
@@ -147,13 +147,23 @@ def _gc_files_unsafe(self) -> None:
(tuple(codes),),
)
for code, store_fnames in self._cr.fetchall():
+ storage = self.env["fs.storage"].get_by_code(code)
fs = self.env["fs.storage"].get_fs_by_code(code, root=True)
for store_fname in store_fnames:
try:
file_path = store_fname.partition("://")[2]
+ if storage.directory_path and not file_path.startswith(
+ storage.directory_path
+ ):
+ _logger.debug(
+ "File %s is not in the storage directory %s",
+ store_fname,
+ storage.directory_path,
+ )
+ continue
fs.rm(file_path)
except Exception:
- _logger.info("Failed to remove file %s", store_fname)
+ _logger.debug("Failed to remove file %s", store_fname)
# delete the records from the table fs_file_gc
self._cr.execute(
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index 41aecd9b16..6fcc4dc6f8 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -337,7 +337,12 @@ def _storage_file_write(self, bin_data: bytes) -> str:
@api.model
def _storage_file_delete(self, fname):
- """Delete the file from the filesystem storage"""
+ """Delete the file from the filesystem storage
+
+ It's safe to use the fname (the store_fname) to delete the file because
+ even if it's the full path to the file, the gc will only delete the file
+ if they belong to the configured storage directory path.
+ """
self._fs_mark_for_gc(fname)
@api.model
diff --git a/fs_attachment/readme/DESCRIPTION.rst b/fs_attachment/readme/DESCRIPTION.rst
index 1e32edad54..4d981652a7 100644
--- a/fs_attachment/readme/DESCRIPTION.rst
+++ b/fs_attachment/readme/DESCRIPTION.rst
@@ -15,3 +15,20 @@ addon ensures that the filename keeps its meaning (In odoo the filename
into the filestore is the file content checksum). Concretely the filename
is based on the pattern:
'--.'
+
+Last but not least, this addon adds on the attachments 2 new fields to use
+to retrieve the file content from a URL:
+
+* ``Internal URL``: URL to retrieve the file content from the Odoo's
+ filestore.
+* ``Filesystem URL``: URL to retrieve the file content from the external
+ storage.
+
+.. note::
+
+ The internal URL is always available, but the filesystem URL is only
+ available when the attachment is stored in an external storage.
+ Particular attention has been paid to limit as much as possible the consumption
+ of resources necessary to serve via Odoo the content stored in an external
+ filesystem. The implementation is based on an end-to-end streaming of content
+ between the external filesystem and the Odoo client application.
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
index a7477c2fdd..b55dc338b0 100644
--- a/fs_attachment/readme/USAGE.rst
+++ b/fs_attachment/readme/USAGE.rst
@@ -1,41 +1,99 @@
Configuration
--------------
+~~~~~~~~~~~~~
-Object storage may be slow, and for this reason, we want to store
-some files in the database whatever.
+The configuration is done through the creation of a filesytem storage record
+into odoo. To create a new storage, go to the menu
+``Settings > Technical > FS Storage`` and click on ``Create``.
-Small images (128, 256) are used in Odoo in list / kanban views. We
-want them to be fast to read.
-They are generally < 50KB (default configuration) so they don't take
-that much space in database, but they'll be read much faster than from
-the object storage.
+In addition to the common fields available to configure a storage, specifics
+fields are available under the section 'Attachment' to configure the way
+attachments will be stored in the filesystem.
-The assets (application/javascript, text/css) are stored in database
-as well whatever their size is:
+* ``Optimizes Directory Path``: This option is useful if you need to prevent
+ having too many files in a single directory. It will create a directory
+ structure based on the attachment's checksum (with 2 levels of depth)
+ For example, if the checksum is ``123456789``, the file will be stored in the
+ directory ``/path/to/storage/12/34/my_file-1-0.txt``.
+* ``Autovacuum GC``: This is used to automatically remove files from the filesystem
+ when it's no longer referenced in Odoo. Some storage backends (like S3) may
+ charge you for the storage of files, so it's important to remove them when
+ they're no longer needed. In some cases, this option is not desirable, for
+ example if you're using a storage backend to store images shared with others
+ systems (like your website) and you don't want to remove the files from the
+ storage while they're still referenced into the others systems.
+ This mechanism is based on a ``fs.file.gc`` model used to collect the files
+ to remove. This model is automatically populated by the ``ir.attachment``
+ model when a file is removed from the database. If you disable this option,
+ you'll have to manually take care of the records in the ``fs.file.gc`` for
+ your filesystem storage.
+* ``Use As Default For Attachment``: This options allows you to declare the storage
+ as the default one for attachments. If you have multiple filesystem storage
+ configured, you can choose which one will be used by default for attachments.
+ Once activated, attachments created without specifying a storage will be
+ stored in this default storage.
+* ``Force DB For Default Attachment Rules``: This option is useful if you want to
+ force the storage of some attachments in the database, even if you have a
+ default filesystem storage configured. This is specially useful when you're
+ using a storage backend like S3, where the latency of the network can be
+ high. This option is a JSON field that allows you to define the mimetypes and
+ the size limit below which the attachments will be stored in the database.
-* a database doesn't have thousands of them
-* of course better for performance
-* better portability of a database: when replicating a production
- instance for dev, the assets are included
+ Small images (128, 256) are used in Odoo in list / kanban views. We
+ want them to be fast to read.
+ They are generally < 50KB (default configuration) so they don't take
+ that much space in database, but they'll be read much faster than from
+ the object storage.
-This storage configuration can be modified in the system parameter
-``ir_attachment.storage.force.database``, as a JSON value, for instance::
+ The assets (application/javascript, text/css) are stored in database
+ as well whatever their size is:
- {"image/": 51200, "application/javascript": 0, "text/css": 0}
+ * a database doesn't have thousands of them
+ * of course better for performance
+ * better portability of a database: when replicating a production
+ instance for dev, the assets are included
-Where the key is the beginning of the mimetype to configure and the
-value is the limit in size below which attachments are kept in DB.
-0 means no limit.
+ The default configuration is:
-Default configuration means:
+ {"image/": 51200, "application/javascript": 0, "text/css": 0}
-* images mimetypes (image/png, image/jpeg, ...) below 50KB are
- stored in database
-* application/javascript are stored in database whatever their size
-* text/css are stored in database whatever their size
+ Where the key is the beginning of the mimetype to configure and the
+ value is the limit in size below which attachments are kept in DB.
+ 0 means no limit.
-Disable attachment storage I/O
-------------------------------
+ Default configuration means:
-Define a environment variable `DISABLE_ATTACHMENT_STORAGE` set to `1`
-This will prevent any kind of exceptions and read/write on storage attachments.
+ * images mimetypes (image/png, image/jpeg, ...) below 50KB are
+ stored in database
+ * application/javascript are stored in database whatever their size
+ * text/css are stored in database whatever their size
+
+ This option is only available on the filesystem storage that is used
+ as default for attachments.
+
+Another key feature of this module is the ability to get access to the attachments
+from URLs.
+
+* ``Base URL``: This is the base URL used to access the attachments from the
+ filesystem storage itself.
+* ``Is Directory Path In URL``: Normally the directory patch configured on the storage
+ is not included in the URL. If you want to include it, you can activate this option.
+
+Tips & Tricks
+~~~~~~~~~~~~~
+
+* When working in multi staging environments, the management of the attachments
+ can be tricky. For example, if you have a production instance and a staging
+ instance based on a backup of the production environment, you may want to have
+ the attachments shared between the two instances BUT you don't want to have
+ one instance removing or modifying the attachments of the other instance.
+
+ To do so, you can configure the same filesystem storage on both instances and
+ use a different directory path. (For S3 storage, directory path is the bucket
+ name). When a file is written in the filesystem storage, it's always written into
+ the directory path configured on the storage and full path of the file is stored
+ in the database. When reading a file, it's always read from the full path stored
+ in the database. So if you have two instances using the same storage with different
+ directory paths, files written in each instance will be stored in different
+ directories but be accessible from the other instance. A check is also done when
+ an attachment is removed to ensure that only files stored in the current directory
+ path are removed.
diff --git a/fs_attachment/static/description/index.html b/fs_attachment/static/description/index.html
new file mode 100644
index 0000000000..84c2990b66
--- /dev/null
+++ b/fs_attachment/static/description/index.html
@@ -0,0 +1,568 @@
+
+
+
+
+
+
+Base Attachment Object Store
+
+
+
+
+
Base Attachment Object Store
+
+
+

+
In some cases, you need to store attachment in another system that the Odoo’s
+filestore. For example, when your deployment is based on a multi-server
+architecture to ensure redundancy and scalability, your attachments must
+be stored in a way that they are accessible from all the servers. In this
+way, you can use a shared storage system like NFS or a cloud storage like
+S3 compliant storage, or….
+
This addon extend the storage mechanism of Odoo’s attachments to allow
+you to store them in any storage filesystem supported by the Python
+library fsspec and made
+available via the fs_storage addon.
+
In contrast to Odoo, when a file is stored into an external storage, this
+addon ensures that the filename keeps its meaning (In odoo the filename
+into the filestore is the file content checksum). Concretely the filename
+is based on the pattern:
+‘<name-without-extension>-<attachment-id>-<version>.<extension>’
+
Last but not least, this addon adds on the attachments 2 new fields to use
+to retrieve the file content from a URL:
+
+- Internal URL: URL to retrieve the file content from the Odoo’s
+filestore.
+- Filesystem URL: URL to retrieve the file content from the external
+storage.
+
+
+
Note
+
The internal URL is always available, but the filesystem URL is only
+available when the attachment is stored in an external storage.
+Particular attention has been paid to limit as much as possible the consumption
+of resources necessary to serve via Odoo the content stored in an external
+filesystem. The implementation is based on an end-to-end streaming of content
+between the external filesystem and the Odoo client application.
+
+
Table of contents
+
+
+
+
+
+
The configuration is done through the creation of a filesytem storage record
+into odoo. To create a new storage, go to the menu
+Settings > Technical > FS Storage and click on Create.
+
In addition to the common fields available to configure a storage, specifics
+fields are available under the section ‘Attachment’ to configure the way
+attachments will be stored in the filesystem.
+
+Optimizes Directory Path: This option is useful if you need to prevent
+having too many files in a single directory. It will create a directory
+structure based on the attachment’s checksum (with 2 levels of depth)
+For example, if the checksum is 123456789, the file will be stored in the
+directory /path/to/storage/12/34/my_file-1-0.txt.
+
+Autovacuum GC: This is used to automatically remove files from the filesystem
+when it’s no longer referenced in Odoo. Some storage backends (like S3) may
+charge you for the storage of files, so it’s important to remove them when
+they’re no longer needed. In some cases, this option is not desirable, for
+example if you’re using a storage backend to store images shared with others
+systems (like your website) and you don’t want to remove the files from the
+storage while they’re still referenced into the others systems.
+This mechanism is based on a fs.file.gc model used to collect the files
+to remove. This model is automatically populated by the ir.attachment
+model when a file is removed from the database. If you disable this option,
+you’ll have to manually take care of the records in the fs.file.gc for
+your filesystem storage.
+
+Use As Default For Attachment: This options allows you to declare the storage
+as the default one for attachments. If you have multiple filesystem storage
+configured, you can choose which one will be used by default for attachments.
+Once activated, attachments created without specifying a storage will be
+stored in this default storage.
+
+Force DB For Default Attachment Rules: This option is useful if you want to
+force the storage of some attachments in the database, even if you have a
+default filesystem storage configured. This is specially useful when you’re
+using a storage backend like S3, where the latency of the network can be
+high. This option is a JSON field that allows you to define the mimetypes and
+the size limit below which the attachments will be stored in the database.
+Small images (128, 256) are used in Odoo in list / kanban views. We
+want them to be fast to read.
+They are generally < 50KB (default configuration) so they don’t take
+that much space in database, but they’ll be read much faster than from
+the object storage.
+The assets (application/javascript, text/css) are stored in database
+as well whatever their size is:
+
+- a database doesn’t have thousands of them
+- of course better for performance
+- better portability of a database: when replicating a production
+instance for dev, the assets are included
+
+The default configuration is:
+
+{“image/”: 51200, “application/javascript”: 0, “text/css”: 0}
+Where the key is the beginning of the mimetype to configure and the
+value is the limit in size below which attachments are kept in DB.
+0 means no limit.
+
+Default configuration means:
+
+- images mimetypes (image/png, image/jpeg, …) below 50KB are
+stored in database
+- application/javascript are stored in database whatever their size
+- text/css are stored in database whatever their size
+
+This option is only available on the filesystem storage that is used
+as default for attachments.
+
+
+
Another key feature of this module is the ability to get access to the attachments
+from URLs.
+
+- Base URL: This is the base URL used to access the attachments from the
+filesystem storage itself.
+- Is Directory Path In URL: Normally the directory patch configured on the storage
+is not included in the URL. If you want to include it, you can activate this option.
+
+
+
+
+
+When working in multi staging environments, the management of the attachments
+can be tricky. For example, if you have a production instance and a staging
+instance based on a backup of the production environment, you may want to have
+the attachments shared between the two instances BUT you don’t want to have
+one instance removing or modifying the attachments of the other instance.
+To do so, you can configure the same filesystem storage on both instances and
+use a different directory path. (For S3 storage, directory path is the bucket
+name). When a file is written in the filesystem storage, it’s always written into
+the directory path configured on the storage and full path of the file is stored
+in the database. When reading a file, it’s always read from the full path stored
+in the database. So if you have two instances using the same storage with different
+directory paths, files written in each instance will be stored in different
+directories but be accessible from the other instance. A check is also done when
+an attachment is removed to ensure that only files stored in the current directory
+path are removed.
+
+
+
+
+
+
+
Bugs are tracked on GitHub Issues.
+In case of trouble, please check there if your issue has already been reported.
+If you spotted it first, help us smashing it by providing a detailed and welcomed
+feedback.
+
Do not contact contributors directly about support or help with technical issues.
+
+
+
+
+
+
+- Camptocamp
+- ACSONE SA/NV
+
+
+
+
+
+
This module is maintained by the OCA.
+

+
OCA, or the Odoo Community Association, is a nonprofit organization whose
+mission is to support the collaborative development of Odoo features and
+promote its widespread use.
+
This module is part of the OCA/storage project on GitHub.
+
You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.
+
+
+
+
+
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index 9ed257d15c..4b5169f3ae 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -6,6 +6,7 @@
from unittest import mock
from odoo.tests.common import TransactionCase
+from odoo.tools import mute_logger
class TestFSAttachment(TransactionCase):
@@ -247,6 +248,42 @@ def test_fs_create_transactional(self):
self.gc_file_model._gc_files_unsafe()
self.assertEqual(os.listdir(self.temp_dir), [])
+ def test_fs_no_delete_if_not_in_current_directory_path(self):
+ """In this test we check that it's not possible to removes files
+ outside the current directory path even if they were created by the
+ current filesystem storage.
+ """
+ # normal delete
+ self.temp_backend.use_as_default_for_attachments = True
+ content = b"Transactional create"
+ attachment = self.ir_attachment_model.create(
+ {"name": "test.txt", "raw": content}
+ )
+ self.env.flush_all()
+ initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ self.assertEqual(
+ os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
+ )
+ attachment.unlink()
+ self.gc_file_model._gc_files_unsafe()
+ self.assertEqual(os.listdir(self.temp_dir), [])
+ # delete outside the current directory path
+ attachment = self.ir_attachment_model.create(
+ {"name": "test.txt", "raw": content}
+ )
+ self.env.flush_all()
+ initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ self.assertEqual(
+ os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
+ )
+ self.temp_backend.directory_path = "/dummy"
+ attachment.unlink()
+ self.gc_file_model._gc_files_unsafe()
+ # unlink is not physically done since the file is outside the current
+ self.assertEqual(
+ os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
+ )
+
def test_no_gc_if_disabled_on_storage(self):
store_fname = "tmp_dir://dummy-0-0.txt"
self.gc_file_model._mark_for_gc(store_fname)
@@ -310,6 +347,7 @@ def test_force_storage_to_db(self):
gc_files = self.gc_file_model.search([]).mapped("store_fname")
self.assertIn(store_fname, gc_files)
+ @mute_logger("odoo.addons.fs_attachment.models.ir_attachment")
def test_force_storage_to_fs(self):
attachment = self.ir_attachment_model.create(
{"name": "test.txt", "raw": b"content"}
diff --git a/fs_attachment/views/fs_storage.xml b/fs_attachment/views/fs_storage.xml
index 303aebdd92..0f7838a0b6 100644
--- a/fs_attachment/views/fs_storage.xml
+++ b/fs_attachment/views/fs_storage.xml
@@ -19,6 +19,10 @@
options="{'mode': 'python'}"
attrs="{'invisible': [('use_as_default_for_attachments', '=', False)]}"
/>
+
+
+
+
From 9f0bbdfa956580eefc5591816d18623d6432fceb Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 17 Apr 2023 16:51:06 +0200
Subject: [PATCH 36/47] add missing files
---
requirements.txt | 2 ++
1 file changed, 2 insertions(+)
create mode 100644 requirements.txt
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000..1d1ccedc2d
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+# generated from manifests external_dependencies
+python_slugify
From d01a083bac82e1592f8251d824ea35908e43804f Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 17 Apr 2023 17:00:14 +0200
Subject: [PATCH 37/47] removed unused file
---
fs_attachment/__manifest__.py | 1 -
fs_attachment/data/res_config_settings_data.xml | 11 -----------
2 files changed, 12 deletions(-)
delete mode 100644 fs_attachment/data/res_config_settings_data.xml
diff --git a/fs_attachment/__manifest__.py b/fs_attachment/__manifest__.py
index 094be911ed..4ff4f5ec51 100644
--- a/fs_attachment/__manifest__.py
+++ b/fs_attachment/__manifest__.py
@@ -14,7 +14,6 @@
"data": [
"security/fs_file_gc.xml",
"views/fs_storage.xml",
- "data/res_config_settings_data.xml",
],
"external_dependencies": {"python": ["python_slugify"]},
"installable": True,
diff --git a/fs_attachment/data/res_config_settings_data.xml b/fs_attachment/data/res_config_settings_data.xml
deleted file mode 100644
index d9609103c5..0000000000
--- a/fs_attachment/data/res_config_settings_data.xml
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-
-
-
From fb539a652f15bf4278425783819b077ee49f6b0d Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Thu, 20 Apr 2023 15:57:04 +0200
Subject: [PATCH 38/47] [IMP] fs_attachement: implements x-access
---
fs_attachment/README.rst | 42 +++++++++++++++-
fs_attachment/fs_stream.py | 40 ++++++++++++++--
fs_attachment/models/fs_storage.py | 14 ++++--
fs_attachment/readme/DESCRIPTION.rst | 7 ++-
fs_attachment/readme/USAGE.rst | 35 +++++++++++++-
fs_attachment/static/description/index.html | 48 ++++++++++++++++---
.../tests/test_fs_attachment_internal_url.py | 32 +++++++++++--
fs_attachment/views/fs_storage.xml | 2 +-
8 files changed, 195 insertions(+), 25 deletions(-)
diff --git a/fs_attachment/README.rst b/fs_attachment/README.rst
index e714b35d3b..ac593310c9 100644
--- a/fs_attachment/README.rst
+++ b/fs_attachment/README.rst
@@ -58,7 +58,12 @@ to retrieve the file content from a URL:
Particular attention has been paid to limit as much as possible the consumption
of resources necessary to serve via Odoo the content stored in an external
filesystem. The implementation is based on an end-to-end streaming of content
- between the external filesystem and the Odoo client application.
+ between the external filesystem and the Odoo client application by default.
+ Nevertheless, if your content is available via a URL on the external filesystem,
+ you can configure the storage to use the x-sendfile mechanism to serve the
+ content if it's activated on your Odoo instance. In this case, the content
+ served by Odoo at the internal URL will be proxied to the filesystem URL
+ by nginx.
**Table of contents**
@@ -144,9 +149,42 @@ Another key feature of this module is the ability to get access to the attachmen
from URLs.
* ``Base URL``: This is the base URL used to access the attachments from the
- filesystem storage itself.
+ filesystem storage itself. If your storage doesn't provide a way to access
+ the files from a URL, you can leave this field empty.
* ``Is Directory Path In URL``: Normally the directory patch configured on the storage
is not included in the URL. If you want to include it, you can activate this option.
+* ``Use X-Sendfile To Serve Internal Url``: If checked and odoo is behind a proxy
+ that supports x-sendfile, the content served by the attachment's internal URL
+ will be served by the proxy using the filesystem url path if defined (This field
+ is available on the attachment if the storage is configured with a base URL)
+ If not, the file will be served by odoo that will stream the content read from
+ the filesystem storage. This option is useful to avoid to serve files from odoo
+ and therefore to avoid to load the odoo process.
+
+ To be fully functional, this option requires the proxy to support x-sendfile
+ (apache) or x-accel-redirect (nginx). You must also configure your proxy by
+ adding for each storage a rule to redirect the url rooted at the 'storagge code'
+ to the server serving the files. For example, if you have a storage with the
+ code 'my_storage' and a server serving the files at the url 'http://myserver.com',
+ you must add the following rule in your proxy configuration:
+
+ .. code-block:: nginx
+
+ location /my_storage/ {
+ internal;
+ proxy_pass http://myserver.com;
+ }
+
+ With this configuration a call to '/web/content//"
+ for a file stored in the 'my_storage' storage will generate a response by odoo
+ with the URI
+ ``/my_storage//--``
+ in the headers ``X-Accel-Redirect`` and ``X-Sendfile`` and the proxy will redirect to
+ ``http://myserver.com//--``.
+
+ see https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/ for more
+ information.
+
Tips & Tricks
~~~~~~~~~~~~~
diff --git a/fs_attachment/fs_stream.py b/fs_attachment/fs_stream.py
index eafe4a43a2..018068587a 100644
--- a/fs_attachment/fs_stream.py
+++ b/fs_attachment/fs_stream.py
@@ -3,6 +3,7 @@
from __future__ import annotations
from odoo.http import STATIC_CACHE_LONG, Response, Stream, request
+from odoo.tools import config
from .models.ir_attachment import IrAttachment
@@ -20,14 +21,17 @@ def from_fs_attachment(cls, attachment: IrAttachment) -> FsStream:
attachment.ensure_one()
if not attachment.fs_filename:
raise ValueError("Attachment is not stored into a filesystem storage")
- fs_info = attachment.fs_storage_id.root_fs.info(attachment.fs_filename)
+ size = 0
+ if cls._check_use_x_sendfile(attachment):
+ fs_info = attachment.fs_storage_id.root_fs.info(attachment.fs_filename)
+ size = fs_info["size"]
return cls(
mimetype=attachment.mimetype,
download_name=attachment.name,
conditional=True,
etag=attachment.checksum,
type="fs",
- size=fs_info["size"],
+ size=size,
last_modified=attachment["__last_update"],
fs_attachment=attachment,
)
@@ -59,9 +63,37 @@ def get_response(self, as_attachment=None, immutable=None, **send_file_kwargs):
"response_class": Response,
**send_file_kwargs,
}
+ use_x_sendfile = self._fs_use_x_sendfile
# The file will be closed by werkzeug...
- f = self.fs_attachment.open("rb")
- res = _send_file(f, **send_file_kwargs)
+ send_file_kwargs["use_x_sendfile"] = use_x_sendfile
+ if not use_x_sendfile:
+ f = self.fs_attachment.open("rb")
+ res = _send_file(f, **send_file_kwargs)
+ else:
+ x_accel_redirect = (
+ f"/{self.fs_attachment.fs_storage_code}{self.fs_attachment.fs_url_path}"
+ )
+ send_file_kwargs["use_x_sendfile"] = True
+ res = _send_file("", **send_file_kwargs)
+ # nginx specific headers
+ res.headers["X-Accel-Redirect"] = x_accel_redirect
+ # apache specific headers
+ res.headers["X-Sendfile"] = x_accel_redirect
+ res.headers["Content-Length"] = 0
+
if immutable and res.cache_control:
res.cache_control["immutable"] = None
return res
+
+ @classmethod
+ def _check_use_x_sendfile(cls, attachment: IrAttachment) -> bool:
+ return (
+ config["x_sendfile"]
+ and attachment.fs_url
+ and attachment.fs_storage_id.use_x_sendfile_to_serve_internal_url
+ )
+
+ @property
+ def _fs_use_x_sendfile(self) -> bool:
+ """Return True if x-sendfile should be used to serve the file"""
+ return self._check_use_x_sendfile(self.fs_attachment)
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
index 4848536652..ab7d5e84dc 100644
--- a/fs_attachment/models/fs_storage.py
+++ b/fs_attachment/models/fs_storage.py
@@ -38,10 +38,14 @@ class FsStorage(models.Model):
"public URL.",
)
base_url_for_files = fields.Char(compute="_compute_base_url_for_files", store=True)
- backend_view_use_internal_url = fields.Boolean(
- help="Decide if Odoo backend views should use the external URL (usually a CDN) "
- "or the internal url with direct access to the storage. "
- "This could save you some money if you pay by CDN traffic."
+ use_x_sendfile_to_serve_internal_url = fields.Boolean(
+ string="Use X-Sendfile To Serve Internal Url",
+ help="If checked and odoo is behind a proxy that supports x-sendfile, "
+ "the content served by the attachment's internal URL will be served"
+ "by the proxy using the fs_url if defined. If not, the file will be "
+ "served by odoo that will stream the content read from the filesystem "
+ "storage. This option is useful to avoid to serve files from odoo "
+ "and therefore to avoid to load the odoo process. ",
)
use_as_default_for_attachments = fields.Boolean(
help="If checked, this storage will be used to store all the attachments ",
@@ -186,7 +190,7 @@ def _get_url_for_attachment(
if not base_url:
return None
if exclude_base_url:
- base_url = base_url.replace(fs_storage.base_url, "") or "/"
+ base_url = base_url.replace(fs_storage.base_url.rstrip("/"), "") or "/"
# always remove the directory_path from the fs_file_name
# ony if it's at the start of the filename
fs_filename = attachment.fs_filename
diff --git a/fs_attachment/readme/DESCRIPTION.rst b/fs_attachment/readme/DESCRIPTION.rst
index 4d981652a7..838a4074df 100644
--- a/fs_attachment/readme/DESCRIPTION.rst
+++ b/fs_attachment/readme/DESCRIPTION.rst
@@ -31,4 +31,9 @@ to retrieve the file content from a URL:
Particular attention has been paid to limit as much as possible the consumption
of resources necessary to serve via Odoo the content stored in an external
filesystem. The implementation is based on an end-to-end streaming of content
- between the external filesystem and the Odoo client application.
+ between the external filesystem and the Odoo client application by default.
+ Nevertheless, if your content is available via a URL on the external filesystem,
+ you can configure the storage to use the x-sendfile mechanism to serve the
+ content if it's activated on your Odoo instance. In this case, the content
+ served by Odoo at the internal URL will be proxied to the filesystem URL
+ by nginx.
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
index b55dc338b0..c35feac799 100644
--- a/fs_attachment/readme/USAGE.rst
+++ b/fs_attachment/readme/USAGE.rst
@@ -74,9 +74,42 @@ Another key feature of this module is the ability to get access to the attachmen
from URLs.
* ``Base URL``: This is the base URL used to access the attachments from the
- filesystem storage itself.
+ filesystem storage itself. If your storage doesn't provide a way to access
+ the files from a URL, you can leave this field empty.
* ``Is Directory Path In URL``: Normally the directory patch configured on the storage
is not included in the URL. If you want to include it, you can activate this option.
+* ``Use X-Sendfile To Serve Internal Url``: If checked and odoo is behind a proxy
+ that supports x-sendfile, the content served by the attachment's internal URL
+ will be served by the proxy using the filesystem url path if defined (This field
+ is available on the attachment if the storage is configured with a base URL)
+ If not, the file will be served by odoo that will stream the content read from
+ the filesystem storage. This option is useful to avoid to serve files from odoo
+ and therefore to avoid to load the odoo process.
+
+ To be fully functional, this option requires the proxy to support x-sendfile
+ (apache) or x-accel-redirect (nginx). You must also configure your proxy by
+ adding for each storage a rule to redirect the url rooted at the 'storagge code'
+ to the server serving the files. For example, if you have a storage with the
+ code 'my_storage' and a server serving the files at the url 'http://myserver.com',
+ you must add the following rule in your proxy configuration:
+
+ .. code-block:: nginx
+
+ location /my_storage/ {
+ internal;
+ proxy_pass http://myserver.com;
+ }
+
+ With this configuration a call to '/web/content//"
+ for a file stored in the 'my_storage' storage will generate a response by odoo
+ with the URI
+ ``/my_storage//--``
+ in the headers ``X-Accel-Redirect`` and ``X-Sendfile`` and the proxy will redirect to
+ ``http://myserver.com//--``.
+
+ see https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/ for more
+ information.
+
Tips & Tricks
~~~~~~~~~~~~~
diff --git a/fs_attachment/static/description/index.html b/fs_attachment/static/description/index.html
index 84c2990b66..2d10200ccb 100644
--- a/fs_attachment/static/description/index.html
+++ b/fs_attachment/static/description/index.html
@@ -398,7 +398,12 @@ Base Attachment Object Store
Particular attention has been paid to limit as much as possible the consumption
of resources necessary to serve via Odoo the content stored in an external
filesystem. The implementation is based on an end-to-end streaming of content
-between the external filesystem and the Odoo client application.
+between the external filesystem and the Odoo client application by default.
+Nevertheless, if your content is available via a URL on the external filesystem,
+you can configure the storage to use the x-sendfile mechanism to serve the
+content if it’s activated on your Odoo instance. In this case, the content
+served by Odoo at the internal URL will be proxied to the filesystem URL
+by nginx.
Table of contents
@@ -492,11 +497,42 @@
Another key feature of this module is the ability to get access to the attachments
from URLs.
-
-- Base URL: This is the base URL used to access the attachments from the
-filesystem storage itself.
-- Is Directory Path In URL: Normally the directory patch configured on the storage
-is not included in the URL. If you want to include it, you can activate this option.
+
+Base URL: This is the base URL used to access the attachments from the
+filesystem storage itself. If your storage doesn’t provide a way to access
+the files from a URL, you can leave this field empty.
+
+Is Directory Path In URL: Normally the directory patch configured on the storage
+is not included in the URL. If you want to include it, you can activate this option.
+
+Use X-Sendfile To Serve Internal Url: If checked and odoo is behind a proxy
+that supports x-sendfile, the content served by the attachment’s internal URL
+will be served by the proxy using the filesystem url path if defined (This field
+is available on the attachment if the storage is configured with a base URL)
+If not, the file will be served by odoo that will stream the content read from
+the filesystem storage. This option is useful to avoid to serve files from odoo
+and therefore to avoid to load the odoo process.
+To be fully functional, this option requires the proxy to support x-sendfile
+(apache) or x-accel-redirect (nginx). You must also configure your proxy by
+adding for each storage a rule to redirect the url rooted at the ‘storagge code’
+to the server serving the files. For example, if you have a storage with the
+code ‘my_storage’ and a server serving the files at the url ‘http://myserver.com’,
+you must add the following rule in your proxy configuration:
+
+location /my_storage/ {
+ internal;
+ proxy_pass http://myserver.com;
+}
+
+With this configuration a call to ‘/web/content/<att.id>/<att.name><att.extension>”
+for a file stored in the ‘my_storage’ storage will generate a response by odoo
+with the URI
+/my_storage/<paht_in_storage>/<att.name>-<att.id>-<version><att.extension>
+in the headers X-Accel-Redirect and X-Sendfile and the proxy will redirect to
+http://myserver.com/<paht_in_storage>/<att.name>-<att.id>-<version><att.extension>.
+see https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/ for more
+information.
+
diff --git a/fs_attachment/tests/test_fs_attachment_internal_url.py b/fs_attachment/tests/test_fs_attachment_internal_url.py
index 61d0119e33..9c9e70eee9 100644
--- a/fs_attachment/tests/test_fs_attachment_internal_url.py
+++ b/fs_attachment/tests/test_fs_attachment_internal_url.py
@@ -3,8 +3,10 @@
import os
import shutil
import tempfile
+from unittest.mock import patch
from odoo.tests.common import HttpCase
+from odoo.tools import config
class TestFsAttachmentInternalUrl(HttpCase):
@@ -20,6 +22,7 @@ def setUpClass(cls):
"protocol": "file",
"code": "tmp_dir",
"directory_path": temp_dir,
+ "base_url": "http://my.public.files/",
}
)
cls.temp_dir = temp_dir
@@ -38,11 +41,11 @@ def setUpClass(cls):
def cleanup_tempdir():
shutil.rmtree(temp_dir)
- def tearDown(self) -> None:
- super().tearDown()
- # empty the temp dir
- for f in os.listdir(self.temp_dir):
- os.remove(os.path.join(self.temp_dir, f))
+ @classmethod
+ def tearDownClass(cls):
+ super().tearDownClass()
+ for f in os.listdir(cls.temp_dir):
+ os.remove(os.path.join(cls.temp_dir, f))
def assertDownload(
self, url, headers, assert_status_code, assert_headers, assert_content=None
@@ -72,3 +75,22 @@ def test_fs_attachment_internal_url(self):
},
assert_content=self.content,
)
+
+ def test_fs_attachment_internal_url_x_sendfile(self):
+ self.authenticate("admin", "admin")
+ self.temp_backend.write({"use_x_sendfile_to_serve_internal_url": True})
+ with patch.object(config, "options", {**config.options, "x_sendfile": True}):
+ x_accel_redirect = f"/tmp_dir/test-{self.attachment.id}-0.txt"
+ self.assertDownload(
+ self.attachment.internal_url,
+ headers={},
+ assert_status_code=200,
+ assert_headers={
+ "Content-Type": "text/plain; charset=utf-8",
+ "Content-Disposition": "inline; filename=test.txt",
+ "X-Accel-Redirect": x_accel_redirect,
+ "Content-Length": "0",
+ "X-Sendfile": x_accel_redirect,
+ },
+ assert_content=None,
+ )
diff --git a/fs_attachment/views/fs_storage.xml b/fs_attachment/views/fs_storage.xml
index 0f7838a0b6..51c6973109 100644
--- a/fs_attachment/views/fs_storage.xml
+++ b/fs_attachment/views/fs_storage.xml
@@ -22,7 +22,7 @@
-
+
From edf1125c42034fa198d0abe264bab3291805756c Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Thu, 20 Apr 2023 16:28:24 +0200
Subject: [PATCH 39/47] [IMP] fs_attachement: implements filename obfuscation
---
fs_attachment/models/fs_storage.py | 16 ++++++++++++++++
fs_attachment/models/ir_attachment.py | 3 +++
fs_attachment/readme/USAGE.rst | 7 +++++++
fs_attachment/tests/test_fs_attachment.py | 15 ++++++++++++++-
.../tests/test_fs_attachment_internal_url.py | 1 -
fs_attachment/views/fs_storage.xml | 1 +
6 files changed, 41 insertions(+), 2 deletions(-)
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
index ab7d5e84dc..b7951cb107 100644
--- a/fs_attachment/models/fs_storage.py
+++ b/fs_attachment/models/fs_storage.py
@@ -67,6 +67,15 @@ class FsStorage(models.Model):
"* text/css are stored in database whatever their size",
default=lambda self: self._default_force_db_for_default_attachment_rules,
)
+ use_filename_obfuscation = fields.Boolean(
+ help="If checked, the filename will be obfuscated. This option is "
+ "useful to avoid to expose sensitive information trough the URL "
+ "or in the remote storage. The obfuscation is done using a hash "
+ "of the filename. The original filename is stored in the attachment "
+ "metadata. The obfusation is to avoid if the storage is used to store "
+ "files that are referenced by other systems (like a website) where "
+ "the filename is important for SEO.",
+ )
_sql_constraints = [
(
@@ -163,6 +172,13 @@ def _must_optimize_directory_path(self, code):
def _must_autovacuum_gc(self, code):
return bool(self.search([("code", "=", code), ("autovacuum_gc", "=", True)]))
+ @api.model
+ @tools.ormcache("code")
+ def _must_use_filename_obfuscation(self, code):
+ return bool(
+ self.search([("code", "=", code), ("use_filename_obfuscation", "=", True)])
+ )
+
@api.depends("base_url", "is_directory_path_in_url")
def _compute_base_url_for_files(self):
for rec in self:
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index 6fcc4dc6f8..65834145b2 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -399,6 +399,9 @@ def _enforce_meaningful_storage_filename(self) -> None:
if not self._is_file_from_a_storage(attachment.store_fname):
continue
fs, storage, filename = self._fs_parse_store_fname(attachment.store_fname)
+ if self.env["fs.storage"]._must_use_filename_obfuscation(storage):
+ attachment.fs_filename = fs.info(filename)["name"]
+ continue
if self._is_fs_filename_meaningful(filename):
continue
new_filename = attachment._build_fs_filename()
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
index c35feac799..e94b6c719c 100644
--- a/fs_attachment/readme/USAGE.rst
+++ b/fs_attachment/readme/USAGE.rst
@@ -110,6 +110,13 @@ from URLs.
see https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/ for more
information.
+* ``Use Filename Obfuscation``: If checked, the filename used to store the content
+ into the filesystem storage will be obfuscated. This is useful to avoid to
+ expose the real filename of the attachments outside of the Odoo database.
+ The filename will be obfuscated by using the checksum of the content. This option
+ is to avoid when the content of your filestore is shared with other systems
+ (like your website) and you want to keep a meaningful filename to ensure
+ SEO. This option is disabled by default.
Tips & Tricks
~~~~~~~~~~~~~
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index 4b5169f3ae..f2963782ce 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -14,7 +14,6 @@ class TestFSAttachment(TransactionCase):
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
- cls.backend = cls.env.ref("fs_storage.default_fs_storage")
temp_dir = tempfile.mkdtemp()
cls.temp_backend = cls.env["fs.storage"].create(
{
@@ -370,6 +369,20 @@ def test_force_storage_to_fs(self):
)
self.assertIn(filename, os.listdir(self.temp_dir))
+ def test_storage_use_filename_obfuscation(self):
+ self.temp_backend.base_url = "https://acsone.eu/media"
+ self.temp_backend.use_as_default_for_attachments = True
+ self.temp_backend.use_filename_obfuscation = True
+ attachment = self.ir_attachment_model.create(
+ {"name": "test.txt", "raw": b"content"}
+ )
+ self.env.flush_all()
+ self.assertTrue(attachment.store_fname)
+ self.assertEqual(attachment.name, "test.txt")
+ self.assertEqual(attachment.checksum, attachment.store_fname.split("/")[-1])
+ self.assertEqual(attachment.checksum, attachment.fs_url.split("/")[-1])
+ self.assertEqual(attachment.mimetype, "text/plain")
+
class MyException(Exception):
"""Exception to be raised into tests ensure that we trap only this
diff --git a/fs_attachment/tests/test_fs_attachment_internal_url.py b/fs_attachment/tests/test_fs_attachment_internal_url.py
index 9c9e70eee9..e805a87008 100644
--- a/fs_attachment/tests/test_fs_attachment_internal_url.py
+++ b/fs_attachment/tests/test_fs_attachment_internal_url.py
@@ -14,7 +14,6 @@ class TestFsAttachmentInternalUrl(HttpCase):
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
- cls.backend = cls.env.ref("fs_storage.default_fs_storage")
temp_dir = tempfile.mkdtemp()
cls.temp_backend = cls.env["fs.storage"].create(
{
diff --git a/fs_attachment/views/fs_storage.xml b/fs_attachment/views/fs_storage.xml
index 51c6973109..8754440670 100644
--- a/fs_attachment/views/fs_storage.xml
+++ b/fs_attachment/views/fs_storage.xml
@@ -22,6 +22,7 @@
+
From 9dba9c6d1d2d1904bc974a818988e1d52162b8a1 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Thu, 27 Apr 2023 08:12:12 +0200
Subject: [PATCH 40/47] [IMP] fs_attachment; Declares maintainer
Also fix typo into summary
---
fs_attachment/__manifest__.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs_attachment/__manifest__.py b/fs_attachment/__manifest__.py
index 4ff4f5ec51..d19737f9c2 100644
--- a/fs_attachment/__manifest__.py
+++ b/fs_attachment/__manifest__.py
@@ -4,7 +4,7 @@
{
"name": "Base Attachment Object Store",
- "summary": "Stora attachments on external object store",
+ "summary": "Store attachments on external object store",
"version": "16.0.1.0.0",
"author": "Camptocamp, ACSONE SA/NV, Odoo Community Association (OCA)",
"license": "AGPL-3",
@@ -18,4 +18,5 @@
"external_dependencies": {"python": ["python_slugify"]},
"installable": True,
"auto_install": False,
+ "maintainers": ["lmignon"],
}
From 4c8ea4169fa771663517f19e796f36dcb3bae393 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Thu, 27 Apr 2023 08:12:46 +0200
Subject: [PATCH 41/47] [FIX] fs_attachment: Do nothing in write if nothing to
write
---
fs_attachment/models/ir_attachment.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index 65834145b2..3c7fab1b21 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -251,6 +251,8 @@ def create(self, vals_list):
return attachments
def write(self, vals):
+ if not self:
+ return self
if ("datas" in vals or "raw" in vals) and not (
"name" in vals or "mimetype" in vals
):
From 9330aeba099cff5202b0ef75c8677399bb8fe5c2 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Wed, 24 May 2023 11:19:34 +0200
Subject: [PATCH 42/47] [IMP] fs_attachment: Set development status to 'Beta'
---
fs_attachment/__manifest__.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/fs_attachment/__manifest__.py b/fs_attachment/__manifest__.py
index d19737f9c2..62affae50e 100644
--- a/fs_attachment/__manifest__.py
+++ b/fs_attachment/__manifest__.py
@@ -8,6 +8,7 @@
"version": "16.0.1.0.0",
"author": "Camptocamp, ACSONE SA/NV, Odoo Community Association (OCA)",
"license": "AGPL-3",
+ "development_status": "Beta",
"category": "Knowledge Management",
"depends": ["fs_storage"],
"website": "https://github.com/OCA/storage",
From 42be0b9109aa6b177f741404b080ac82a95d0c78 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Sun, 4 Jun 2023 22:11:02 +0200
Subject: [PATCH 43/47] [IMP] fs_attachment: Add full support for file like
open method
---
fs_attachment/README.rst | 70 +++-
fs_attachment/models/ir_attachment.py | 361 ++++++++++++++++--
fs_attachment/readme/DESCRIPTION.rst | 8 +-
fs_attachment/readme/USAGE.rst | 51 ++-
fs_attachment/static/description/index.html | 80 +++-
fs_attachment/tests/__init__.py | 1 +
fs_attachment/tests/common.py | 41 ++
fs_attachment/tests/test_fs_attachment.py | 53 +--
.../test_fs_attachment_file_like_adapter.py | 150 ++++++++
9 files changed, 713 insertions(+), 102 deletions(-)
create mode 100644 fs_attachment/tests/common.py
create mode 100644 fs_attachment/tests/test_fs_attachment_file_like_adapter.py
diff --git a/fs_attachment/README.rst b/fs_attachment/README.rst
index ac593310c9..3a27b3e165 100644
--- a/fs_attachment/README.rst
+++ b/fs_attachment/README.rst
@@ -43,7 +43,7 @@ into the filestore is the file content checksum). Concretely the filename
is based on the pattern:
'--.'
-Last but not least, this addon adds on the attachments 2 new fields to use
+This addon also adds on the attachments 2 new fields to use
to retrieve the file content from a URL:
* ``Internal URL``: URL to retrieve the file content from the Odoo's
@@ -65,6 +65,12 @@ to retrieve the file content from a URL:
served by Odoo at the internal URL will be proxied to the filesystem URL
by nginx.
+Last but not least, the addon adds a new method `open` on the attachment. This
+method allows you to open the attachment as a file. For attachments stored into
+the filestore or in an external filesystem, it allows you to directly read from
+and write to the file and therefore minimize the memory consumption since data
+are not kept into memory before being written into the database.
+
**Table of contents**
.. contents::
@@ -185,6 +191,60 @@ from URLs.
see https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/ for more
information.
+* ``Use Filename Obfuscation``: If checked, the filename used to store the content
+ into the filesystem storage will be obfuscated. This is useful to avoid to
+ expose the real filename of the attachments outside of the Odoo database.
+ The filename will be obfuscated by using the checksum of the content. This option
+ is to avoid when the content of your filestore is shared with other systems
+ (like your website) and you want to keep a meaningful filename to ensure
+ SEO. This option is disabled by default.
+
+
+Advanced usage: Using attachment as a file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `open` method on the attachment can be used to open manipulate the attachment
+as a file object. The object returned by the call to the method implements
+methods from ``io.IOBase``. The method can ba called as any other python method.
+In such a case, it's your responsibility to close the file at the end of your
+process.
+
+.. code-block:: python
+
+ attachment = self.env.create({"name": "test.txt"})
+ the_file = attachment.open("wb")
+ try:
+ the_file.write(b"content")
+ finally:
+ the_file.close()
+
+The result of the call to `open` also works in a context ``with`` block. In such
+a case, when the code exit the block, the file is automatically closed.
+
+.. code-block:: python
+
+ attachment = self.env.create({"name": "test.txt"})
+ with attachment.open("wb") as the_file:
+ the_file.write(b"content")
+
+It's always safer to prefer the second approach.
+
+When your attachment is stored into the odoo filestore or into an external
+filesystem storage, each time you call the open method, a new file is created.
+This way of doing ensures that if the transaction is rollback the original content
+is preserve. Nevertheless you could have use cases where you would like to write
+to the existing file directly. For example you could create an empty attachment
+to store a csv report and then use the `open` method to write your content directly
+into the new file. To support this kind a use cases, the parameter `new_version`
+can be passed as `False` to avoid the creation of a new file.
+
+.. code-block:: python
+
+ attachment = self.env.create({"name": "test.txt"})
+ with attachment.open("w", new_version=False) as f:
+ writer = csv.writer(f, delimiter=";")
+ ....
+
Tips & Tricks
~~~~~~~~~~~~~
@@ -253,6 +313,14 @@ OCA, or the Odoo Community Association, is a nonprofit organization whose
mission is to support the collaborative development of Odoo features and
promote its widespread use.
+.. |maintainer-lmignon| image:: https://github.com/lmignon.png?size=40px
+ :target: https://github.com/lmignon
+ :alt: lmignon
+
+Current `maintainer `__:
+
+|maintainer-lmignon|
+
This module is part of the `OCA/storage `_ project on GitHub.
You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index 3c7fab1b21..4d7d67a246 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -415,11 +415,18 @@ def _enforce_meaningful_storage_filename(self) -> None:
# we need to update the store_fname with the new filename by
# calling the write method of the field since the write method
# of ir_attachment prevent normal write on store_fname
- attachment._fields["store_fname"].write(
- attachment, f"{storage}://{new_filename}"
- )
+ attachment._force_write_store_fname(f"{storage}://{new_filename}")
self._fs_mark_for_gc(attachment.store_fname)
+ def _force_write_store_fname(self, store_fname):
+ """Force the write of the store_fname field
+
+ The base implementation of the store_fname field prevent the write
+ of the store_fname field. This method bypass this limitation by
+ calling the write method of the field directly.
+ """
+ self._fields["store_fname"].write(self, store_fname)
+
@api.model
def _get_fs_storage_for_code(
self, code: str, root: bool = False
@@ -514,19 +521,26 @@ def _fs_mark_for_gc(self, fname):
self.env["fs.file.gc"]._mark_for_gc(fname)
def open(
- self, mode="rb", block_size=None, cache_options=None, compression=None, **kwargs
+ self,
+ mode="rb",
+ block_size=None,
+ cache_options=None,
+ compression=None,
+ new_version=True,
+ **kwargs,
) -> io.IOBase:
"""
Return a file-like object from the filesystem storage where the attachment
content is stored.
- This method works for all attachments, even if the content is stored in the
- database or into the odoo filestore. (parameters are ignored in the case
- of the database storage).
+ In read mode, this method works for all attachments, even if the content
+ is stored in the database or into the odoo filestore or a filesystem storage.
The resultant instance must function correctly in a context ``with``
block.
+ (parameters are ignored in the case of the database storage).
+
Parameters
----------
path: str
@@ -541,46 +555,30 @@ def open(
If given, open file using compression codec. Can either be a compression
name (a key in ``fsspec.compression.compr``) or "infer" to guess the
compression from the filename suffix.
+ new_version: bool
+ If True, and mode is 'w', create a new version of the file.
+ If False, and mode is 'w', overwrite the current version of the file.
+ This flag is True by default to avoid data loss and ensure transaction
+ mechanism between Odoo and the filesystem storage.
encoding, errors, newline: passed on to TextIOWrapper for text mode
Returns
-------
A file-like object
- Caution: modifications to the file-like object are not transactional.
- If you modify the file-like object and the current transaction is rolled
- back, the changes will be saved to the file and not rolled back.
- Moreover mofication to the content will not be reflected into the cache
- and could lead to data mismatch when the data will be flush
-
TODO if open with 'w' in mode, we could use a buffered IO detecting that
the content is modified and invalidating the attachment cache...
"""
self.ensure_one()
- if self._is_file_from_a_storage(self.store_fname):
- fs, _storage, fname = self._fs_parse_store_fname(
- self.store_fname, root=True
- )
- return fs.open(
- fname,
- mode=mode,
- block_size=block_size,
- cache_options=cache_options,
- compression=compression,
- **kwargs,
- )
- if self.store_fname:
- return fsspec.filesystem("file").open(
- self._full_path(self.store_fname),
- mode=mode,
- block_size=block_size,
- cache_options=cache_options,
- compression=compression,
- **kwargs,
- )
- if "w" in mode:
- raise SystemError("Write mode is not supported for data read from database")
- return io.BytesIO(self.db_datas)
+ return AttachmentFileLikeAdapter(
+ self,
+ mode=mode,
+ block_size=block_size,
+ cache_options=cache_options,
+ compression=compression,
+ new_version=new_version,
+ **kwargs,
+ )
@contextmanager
def _do_in_new_env(self, new_cr=False):
@@ -782,3 +780,292 @@ def _force_storage_to_object_storage(self, new_cr=False):
if files_to_clean:
new_env.cr.commit()
clean_fs(files_to_clean)
+
+
+class AttachmentFileLikeAdapter(object):
+ """
+ This class is a wrapper class around the ir.attachment model. It is used to
+ open the ir.attachment as a file and to read/write data to it.
+
+ When the content of the file is stored into the odoo filestore or in a
+ filesystem storage, this object allows you to read/write the content from
+ the file in a direct way without having to read/write the whole file into
+ memory. When the content of the file is stored into database, this content
+ is read/written from/into a buffer in memory.
+
+ Parameters
+ ----------
+ attachment : ir.attachment
+ The attachment to open as a file.
+ mode: str like 'rb', 'w'
+ See builtin ``open()``
+ block_size: int
+ Some indication of buffering - this is a value in bytes
+ cache_options : dict, optional
+ Extra arguments to pass through to the cache.
+ compression: string or None
+ If given, open file using compression codec. Can either be a compression
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
+ compression from the filename suffix.
+ new_version: bool
+ If True, and mode is 'w', create a new version of the file.
+ If False, and mode is 'w', overwrite the current version of the file.
+ This flag is True by default to avoid data loss and ensure transaction
+ mechanism between Odoo and the filesystem storage.
+ encoding, errors, newline: passed on to TextIOWrapper for text mode
+
+ You can use this class to adapt an attachment object as a file in 2 ways:
+ * as a context manager wrapping the attachment object as a file
+ * or as a nomral utility class
+
+ Examples
+
+ >>> with AttachmentFileLikeAdapter(attachment, mode="rb") as f:
+ ... f.read()
+ b'Hello World'
+ # at the end of the context manager, the file is closed
+ >>> f = AttachmentFileLikeAdapter(attachment, mode="rb")
+ >>> f.read()
+ b'Hello World'
+ # you have to close the file manually
+ >>> f.close()
+
+ """
+
+ def __init__(
+ self,
+ attachment: IrAttachment,
+ mode: str = "rb",
+ block_size: int | None = None,
+ cache_options: dict | None = None,
+ compression: str | None = None,
+ new_version: bool = False,
+ **kwargs,
+ ):
+ self._attachment = attachment
+ self._mode = mode
+ self._block_size = block_size
+ self._cache_options = cache_options
+ self._compression = compression
+ self._new_version = new_version
+ self._kwargs = kwargs
+
+ # state attributes
+ self._file: io.IOBase | None = None
+ self._filesystem: fsspec.AbstractFileSystem | None = None
+ self._new_store_fname: str | None = None
+
+ @property
+ def attachment(self) -> IrAttachment:
+ """The attachment object the file is related to"""
+ return self._attachment
+
+ @property
+ def mode(self) -> str:
+ """The mode used to open the file"""
+ return self._mode
+
+ @property
+ def block_size(self) -> int | None:
+ """The block size used to open the file"""
+ return self._block_size
+
+ @property
+ def cache_options(self) -> dict | None:
+ """The cache options used to open the file"""
+ return self._cache_options
+
+ @property
+ def compression(self) -> str | None:
+ """The compression used to open the file"""
+ return self._compression
+
+ @property
+ def new_version(self) -> bool:
+ """Is the file open for a new version"""
+ return self._new_version
+
+ @property
+ def kwargs(self) -> dict:
+ """The kwargs passed when opening the file on the"""
+ return self._kwargs
+
+ @property
+ def _is_open_for_modify(self) -> bool:
+ """Is the file open for modification
+ A file is open for modification if it is open for writing or appending
+ """
+ return "w" in self.mode or "a" in self.mode
+
+ @property
+ def _is_open_for_read(self) -> bool:
+ """Is the file open for reading"""
+ return "r" in self.mode
+
+ @property
+ def _is_stored_in_db(self) -> bool:
+ """Is the file stored in database"""
+ return self.attachment._storage() == "db"
+
+ def __enter__(self) -> io.IOBase:
+ """Called when entering the context manager
+
+ Create the file object and return it.
+ """
+ # we call the attachment instance to get the file object
+ self._file_open()
+ return self._file
+
+ def _file_open(self) -> io.IOBase:
+ """Open the attachment content as a file-like object
+
+ This method will initialize the following attributes:
+
+ * _file: the file-like object.
+ * _filesystem: filesystem object.
+ * _new_store_fname: the new store_fname if the file is
+ opened for a new version.
+ """
+ new_store_fname = None
+ if (
+ self._is_open_for_read
+ or (self._is_open_for_modify and not self.new_version)
+ or self._is_stored_in_db
+ ):
+ if self.attachment._is_file_from_a_storage(self.attachment.store_fname):
+ fs, _storage, fname = self.attachment._fs_parse_store_fname(
+ self.attachment.store_fname, root=True
+ )
+ filepath = fname
+ filesystem = fs
+ elif self.attachment.store_fname:
+ filepath = self.attachment._full_path(self.attachment.store_fname)
+ filesystem = fsspec.filesystem("file")
+ else:
+ filepath = f"{self.attachment.id}"
+ filesystem = fsspec.filesystem("memory")
+ if "a" in self.mode or self._is_open_for_read:
+ filesystem.pipe_file(filepath, self.attachment.db_datas)
+ the_file = filesystem.open(
+ filepath,
+ mode=self.mode,
+ block_size=self.block_size,
+ cache_options=self.cache_options,
+ compression=self.compression,
+ **self.kwargs,
+ )
+ else:
+ # mode='w' and new_version=True and storage != 'db'
+ # We must create a new file with a new name. If we are in an
+ # append mode, we must copy the content of the old file (or create
+ # the new one by copy of the old one).
+ # to not break the storage plugin mechanism, we'll use the
+ # _file_write method to create the new empty file with a random
+ # content and checksum to avoid collision.
+ content = self._gen_random_content()
+ checksum = self.attachment._compute_checksum(content)
+ new_store_fname = self.attachment._file_write(content, checksum)
+ if self.attachment._is_file_from_a_storage(new_store_fname):
+ # the new store_fname is a path from the specified storage
+ # the store_fname into the attachement is expressed from the
+ # root filesystem. This is done on purpose to always read
+ # from the root filesystem and write to the specialized one
+ fs, _storage, fname = self.attachment._fs_parse_store_fname(
+ new_store_fname, root=False
+ )
+ new_filepath = fs.info(fname)["name"]
+ root_fs, _storage, old_filepath = self.attachment._fs_parse_store_fname(
+ self.attachment.store_fname, root=True
+ )
+ filesystem = root_fs
+ else:
+ new_filepath = self.attachment._full_path(new_store_fname)
+ old_filepath = self.attachment._full_path(self.attachment.store_fname)
+ filesystem = fsspec.filesystem("file")
+ if "a" in self.mode:
+ filesystem.cp_file(old_filepath, new_filepath)
+ the_file = filesystem.open(
+ new_filepath,
+ mode=self.mode,
+ block_size=self.block_size,
+ cache_options=self.cache_options,
+ compression=self.compression,
+ **self.kwargs,
+ )
+ self._filesystem = filesystem
+ self._new_store_fname = new_store_fname
+ self._file = the_file
+
+ def _gen_random_content(self, size=256):
+ """Generate a random content of size bytes"""
+ return os.urandom(size)
+
+ def _file_close(self):
+ """Close the file-like object opened by _file_open"""
+ if not self._file:
+ return
+ if not self._file.closed:
+ self._file.flush()
+ self._file.close()
+ if self._is_open_for_modify:
+ attachment_data = self._get_attachment_data()
+ if (
+ not (self.new_version and self._new_store_fname)
+ and self._is_stored_in_db
+ ):
+ attachment_data["raw"] = self._file.getvalue()
+ self.attachment.write(attachment_data)
+ if self.new_version and self._new_store_fname:
+ self.attachment._force_write_store_fname(self._new_store_fname)
+ self.attachment._enforce_meaningful_storage_filename()
+ self._ensure_cache_consistency()
+
+ def _get_attachment_data(self) -> dict:
+ ret = {}
+ if self._file:
+ ret["checksum"] = self._filesystem.checksum(self._file.path)
+ ret["file_size"] = self._filesystem.size(self._file.path)
+ # TODO index_content is too expensive to compute here or should be configurable
+ # data = self._file.read()
+ # ret["index_content"] = self.attachment._index_content(data,
+ # self.attachment.mimetype, ret["checksum"])
+ ret["index_content"] = b""
+
+ return ret
+
+ def _ensure_cache_consistency(self):
+ """Ensure the cache consistency once the file is closed"""
+ if self._is_open_for_modify and not self._is_stored_in_db:
+ self.attachment.invalidate_recordset(fnames=["raw", "datas", "db_datas"])
+ if (
+ self.attachment.res_model
+ and self.attachment.res_id
+ and self.attachment.res_field
+ ):
+ self.attachment.env[self.attachment.res_model].browse(
+ self.attachment.res_id
+ ).invalidate_recordset(fnames=[self.attachment.res_field])
+
+ def __exit__(self, *args):
+ """Called when exiting the context manager.
+
+ Close the file if it is not already closed.
+ """
+ self._file_close()
+
+ def __getattr__(self, attr):
+ """
+ Forward all other attributes to the underlying file object.
+
+ This method is required to make the object behave like a file object
+ when the AttachmentFileLikeAdapter is used outside a context manager.
+
+ .. code-block:: python
+
+ f = AttachmentFileLikeAdapter(attachment)
+ f.read()
+
+ """
+ if not self._file:
+ self.__enter__()
+ return getattr(self._file, attr)
diff --git a/fs_attachment/readme/DESCRIPTION.rst b/fs_attachment/readme/DESCRIPTION.rst
index 838a4074df..f0ca6ff482 100644
--- a/fs_attachment/readme/DESCRIPTION.rst
+++ b/fs_attachment/readme/DESCRIPTION.rst
@@ -16,7 +16,7 @@ into the filestore is the file content checksum). Concretely the filename
is based on the pattern:
'--.'
-Last but not least, this addon adds on the attachments 2 new fields to use
+This addon also adds on the attachments 2 new fields to use
to retrieve the file content from a URL:
* ``Internal URL``: URL to retrieve the file content from the Odoo's
@@ -37,3 +37,9 @@ to retrieve the file content from a URL:
content if it's activated on your Odoo instance. In this case, the content
served by Odoo at the internal URL will be proxied to the filesystem URL
by nginx.
+
+Last but not least, the addon adds a new method `open` on the attachment. This
+method allows you to open the attachment as a file. For attachments stored into
+the filestore or in an external filesystem, it allows you to directly read from
+and write to the file and therefore minimize the memory consumption since data
+are not kept into memory before being written into the database.
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
index e94b6c719c..2bd95e3dc7 100644
--- a/fs_attachment/readme/USAGE.rst
+++ b/fs_attachment/readme/USAGE.rst
@@ -115,8 +115,55 @@ from URLs.
expose the real filename of the attachments outside of the Odoo database.
The filename will be obfuscated by using the checksum of the content. This option
is to avoid when the content of your filestore is shared with other systems
- (like your website) and you want to keep a meaningful filename to ensure
- SEO. This option is disabled by default.
+ (like your website) and you want to keep a meaningful filename to ensure
+ SEO. This option is disabled by default.
+
+
+Advanced usage: Using attachment as a file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `open` method on the attachment can be used to open manipulate the attachment
+as a file object. The object returned by the call to the method implements
+methods from ``io.IOBase``. The method can ba called as any other python method.
+In such a case, it's your responsibility to close the file at the end of your
+process.
+
+.. code-block:: python
+
+ attachment = self.env.create({"name": "test.txt"})
+ the_file = attachment.open("wb")
+ try:
+ the_file.write(b"content")
+ finally:
+ the_file.close()
+
+The result of the call to `open` also works in a context ``with`` block. In such
+a case, when the code exit the block, the file is automatically closed.
+
+.. code-block:: python
+
+ attachment = self.env.create({"name": "test.txt"})
+ with attachment.open("wb") as the_file:
+ the_file.write(b"content")
+
+It's always safer to prefer the second approach.
+
+When your attachment is stored into the odoo filestore or into an external
+filesystem storage, each time you call the open method, a new file is created.
+This way of doing ensures that if the transaction is rollback the original content
+is preserve. Nevertheless you could have use cases where you would like to write
+to the existing file directly. For example you could create an empty attachment
+to store a csv report and then use the `open` method to write your content directly
+into the new file. To support this kind a use cases, the parameter `new_version`
+can be passed as `False` to avoid the creation of a new file.
+
+.. code-block:: python
+
+ attachment = self.env.create({"name": "test.txt"})
+ with attachment.open("w", new_version=False) as f:
+ writer = csv.writer(f, delimiter=";")
+ ....
+
Tips & Tricks
~~~~~~~~~~~~~
diff --git a/fs_attachment/static/description/index.html b/fs_attachment/static/description/index.html
index 2d10200ccb..bf82a6988f 100644
--- a/fs_attachment/static/description/index.html
+++ b/fs_attachment/static/description/index.html
@@ -383,7 +383,7 @@ Base Attachment Object Store
into the filestore is the file content checksum). Concretely the filename
is based on the pattern:
‘<name-without-extension>-<attachment-id>-<version>.<extension>’
-Last but not least, this addon adds on the attachments 2 new fields to use
+
This addon also adds on the attachments 2 new fields to use
to retrieve the file content from a URL:
- Internal URL: URL to retrieve the file content from the Odoo’s
@@ -405,19 +405,25 @@
Base Attachment Object Store
served by Odoo at the internal URL will be proxied to the filesystem URL
by nginx.
+Last but not least, the addon adds a new method open on the attachment. This
+method allows you to open the attachment as a file. For attachments stored into
+the filestore or in an external filesystem, it allows you to directly read from
+and write to the file and therefore minimize the memory consumption since data
+are not kept into memory before being written into the database.
Table of contents
- Usage
-- Bug Tracker
-- Credits
@@ -533,10 +539,56 @@
see https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/ for more
information.
+Use Filename Obfuscation: If checked, the filename used to store the content
+into the filesystem storage will be obfuscated. This is useful to avoid to
+expose the real filename of the attachments outside of the Odoo database.
+The filename will be obfuscated by using the checksum of the content. This option
+is to avoid when the content of your filestore is shared with other systems
+(like your website) and you want to keep a meaningful filename to ensure
+SEO. This option is disabled by default.
+
+
+
+
The open method on the attachment can be used to open manipulate the attachment
+as a file object. The object returned by the call to the method implements
+methods from io.IOBase. The method can ba called as any other python method.
+In such a case, it’s your responsibility to close the file at the end of your
+process.
+
+attachment = self.env.create({"name": "test.txt"})
+the_file = attachment.open("wb")
+try:
+ the_file.write(b"content")
+finally:
+ the_file.close()
+
+
The result of the call to open also works in a context with block. In such
+a case, when the code exit the block, the file is automatically closed.
+
+attachment = self.env.create({"name": "test.txt"})
+with attachment.open("wb") as the_file:
+ the_file.write(b"content")
+
+
It’s always safer to prefer the second approach.
+
When your attachment is stored into the odoo filestore or into an external
+filesystem storage, each time you call the open method, a new file is created.
+This way of doing ensures that if the transaction is rollback the original content
+is preserve. Nevertheless you could have use cases where you would like to write
+to the existing file directly. For example you could create an empty attachment
+to store a csv report and then use the open method to write your content directly
+into the new file. To support this kind a use cases, the parameter new_version
+can be passed as False to avoid the creation of a new file.
+
+attachment = self.env.create({"name": "test.txt"})
+with attachment.open("w", new_version=False) as f:
+ writer = csv.writer(f, delimiter=";")
+ ....
+
+
-
+
When working in multi staging environments, the management of the attachments
can be tricky. For example, if you have a production instance and a staging
@@ -558,7 +610,7 @@
-
+
Bugs are tracked on GitHub Issues.
In case of trouble, please check there if your issue has already been reported.
If you spotted it first, help us smashing it by providing a detailed and welcomed
@@ -566,16 +618,16 @@
Do not contact contributors directly about support or help with technical issues.
-
+
-
+
This module is maintained by the OCA.
OCA, or the Odoo Community Association, is a nonprofit organization whose
mission is to support the collaborative development of Odoo features and
promote its widespread use.
+
Current maintainer:
+

This module is part of the OCA/storage project on GitHub.
You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.
diff --git a/fs_attachment/tests/__init__.py b/fs_attachment/tests/__init__.py
index 21e0af46e4..7f56d04124 100644
--- a/fs_attachment/tests/__init__.py
+++ b/fs_attachment/tests/__init__.py
@@ -1,2 +1,3 @@
from . import test_fs_attachment
+from . import test_fs_attachment_file_like_adapter
from . import test_fs_attachment_internal_url
diff --git a/fs_attachment/tests/common.py b/fs_attachment/tests/common.py
new file mode 100644
index 0000000000..8369fd9732
--- /dev/null
+++ b/fs_attachment/tests/common.py
@@ -0,0 +1,41 @@
+# Copyright 2023 ACSONE SA/NV (http://acsone.eu).
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
+import os
+import shutil
+import tempfile
+
+from odoo.tests.common import TransactionCase
+
+
+class TestFSAttachmentCommon(TransactionCase):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
+ temp_dir = tempfile.mkdtemp()
+ cls.temp_backend = cls.env["fs.storage"].create(
+ {
+ "name": "Temp FS Storage",
+ "protocol": "file",
+ "code": "tmp_dir",
+ "directory_path": temp_dir,
+ }
+ )
+ cls.temp_dir = temp_dir
+ cls.gc_file_model = cls.env["fs.file.gc"]
+ cls.ir_attachment_model = cls.env["ir.attachment"]
+
+ @cls.addClassCleanup
+ def cleanup_tempdir():
+ shutil.rmtree(temp_dir)
+
+ def tearDown(self) -> None:
+ super().tearDown()
+ # empty the temp dir
+ for f in os.listdir(self.temp_dir):
+ os.remove(os.path.join(self.temp_dir, f))
+
+
+class MyException(Exception):
+ """Exception to be raised into tests ensure that we trap only this
+ exception and not other exceptions raised by the test"""
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index f2963782ce..db98d604c3 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -1,42 +1,14 @@
# Copyright 2023 ACSONE SA/NV (http://acsone.eu).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import os
-import shutil
-import tempfile
from unittest import mock
-from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
+from .common import MyException, TestFSAttachmentCommon
-class TestFSAttachment(TransactionCase):
- @classmethod
- def setUpClass(cls):
- super().setUpClass()
- cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
- temp_dir = tempfile.mkdtemp()
- cls.temp_backend = cls.env["fs.storage"].create(
- {
- "name": "Temp FS Storage",
- "protocol": "file",
- "code": "tmp_dir",
- "directory_path": temp_dir,
- }
- )
- cls.temp_dir = temp_dir
- cls.gc_file_model = cls.env["fs.file.gc"]
- cls.ir_attachment_model = cls.env["ir.attachment"]
-
- @cls.addClassCleanup
- def cleanup_tempdir():
- shutil.rmtree(temp_dir)
-
- def tearDown(self) -> None:
- super().tearDown()
- # empty the temp dir
- for f in os.listdir(self.temp_dir):
- os.remove(os.path.join(self.temp_dir, f))
+class TestFSAttachment(TestFSAttachmentCommon):
def test_create_attachment_explicit_location(self):
content = b"This is a test attachment"
attachment = (
@@ -47,7 +19,6 @@ def test_create_attachment_explicit_location(self):
)
.create({"name": "test.txt", "raw": content})
)
- self.env.flush_all()
self.assertEqual(os.listdir(self.temp_dir), [f"test-{attachment.id}-0.txt"])
self.assertEqual(attachment.raw, content)
self.assertFalse(attachment.db_datas)
@@ -57,10 +28,6 @@ def test_create_attachment_explicit_location(self):
with attachment.open("wb") as f:
f.write(b"new")
- # refresh is required while we don't use a file-like object proxy
- # that detect the modification of the content and invalidate the
- # record's cache
- attachment.invalidate_recordset()
self.assertEqual(attachment.raw, b"new")
def test_open_attachment_in_db(self):
@@ -74,8 +41,9 @@ def test_open_attachment_in_db(self):
self.assertEqual(attachment.mimetype, "text/plain")
with attachment.open("rb") as f:
self.assertEqual(f.read(), content)
- with self.assertRaisesRegex(SystemError, "Write mode is not supported"):
- attachment.open("wb")
+ with attachment.open("wb") as f:
+ f.write(b"new")
+ self.assertEqual(attachment.raw, b"new")
def test_attachment_open_in_filestore(self):
self.env["ir.config_parameter"].sudo().set_param(
@@ -92,10 +60,6 @@ def test_attachment_open_in_filestore(self):
self.assertEqual(f.read(), content)
with attachment.open("wb") as f:
f.write(b"new")
- # refresh is required while we don't use a file-like object proxy
- # that detect the modification of the content and invalidate the
- # record's cache
- attachment.invalidate_recordset()
self.assertEqual(attachment.raw, b"new")
def test_default_attachment_store_in_fs(self):
@@ -188,8 +152,6 @@ def test_fs_update_transactionnal(self):
raise MyException("dummy exception")
except MyException:
...
- attachment.invalidate_recordset()
- self.env.flush_all()
self.assertEqual(attachment.store_fname, f"tmp_dir://{initial_filename}")
self.assertEqual(attachment.fs_filename, initial_filename)
self.assertEqual(attachment.raw, content)
@@ -382,8 +344,3 @@ def test_storage_use_filename_obfuscation(self):
self.assertEqual(attachment.checksum, attachment.store_fname.split("/")[-1])
self.assertEqual(attachment.checksum, attachment.fs_url.split("/")[-1])
self.assertEqual(attachment.mimetype, "text/plain")
-
-
-class MyException(Exception):
- """Exception to be raised into tests ensure that we trap only this
- exception and not other exceptions raised by the test"""
diff --git a/fs_attachment/tests/test_fs_attachment_file_like_adapter.py b/fs_attachment/tests/test_fs_attachment_file_like_adapter.py
new file mode 100644
index 0000000000..44ee875df4
--- /dev/null
+++ b/fs_attachment/tests/test_fs_attachment_file_like_adapter.py
@@ -0,0 +1,150 @@
+# Copyright 2023 ACSONE SA/NV (http://acsone.eu).
+# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
+
+from ..models.ir_attachment import AttachmentFileLikeAdapter
+from .common import MyException, TestFSAttachmentCommon
+
+
+class TestFSAttachmentFileLikeAdapterMixin:
+ @classmethod
+ def _create_attachment(cls):
+ raise NotImplementedError
+
+ @classmethod
+ def prepareClass(cls):
+ cls.initial_content = b"This is a test attachment"
+ cls.new_content = b"This is a new test attachment"
+
+ def prepare(self):
+ self.attachment = self._create_attachment()
+
+ def open(self, attachment=None, mode="rb", new_version=False, **kwargs):
+ return AttachmentFileLikeAdapter(
+ attachment or self.attachment,
+ mode=mode,
+ new_version=new_version,
+ **kwargs,
+ )
+
+ def test_read(self):
+ with self.open(model="rf") as f:
+ self.assertEqual(f.read(), self.initial_content)
+
+ def test_write(self):
+ with self.open(mode="wb") as f:
+ f.write(self.new_content)
+ self.assertEqual(self.new_content, self.attachment.raw)
+
+ def test_write_append(self):
+ self.assertEqual(self.initial_content, self.attachment.raw)
+ with self.open(mode="ab") as f:
+ f.write(self.new_content)
+ self.assertEqual(self.initial_content + self.new_content, self.attachment.raw)
+
+ def test_write_new_version(self):
+ initial_fname = self.attachment.store_fname
+ with self.open(mode="wb", new_version=True) as f:
+ f.write(self.new_content)
+ self.assertEqual(self.new_content, self.attachment.raw)
+ if initial_fname:
+ self.assertNotEqual(self.attachment.store_fname, initial_fname)
+
+ def test_write_append_new_version(self):
+ initial_fname = self.attachment.store_fname
+ with self.open(mode="ab", new_version=True) as f:
+ f.write(self.new_content)
+ self.assertEqual(self.initial_content + self.new_content, self.attachment.raw)
+ if initial_fname:
+ self.assertNotEqual(self.attachment.store_fname, initial_fname)
+
+ def test_write_transactional_new_version_only(self):
+ try:
+ initial_fname = self.attachment.store_fname
+ with self.env.cr.savepoint():
+ with self.open(mode="wb", new_version=True) as f:
+ f.write(self.new_content)
+ self.assertEqual(self.new_content, self.attachment.raw)
+ if initial_fname:
+ self.assertNotEqual(self.attachment.store_fname, initial_fname)
+ raise MyException("Test")
+ except MyException:
+ ...
+
+ self.assertEqual(self.initial_content, self.attachment.raw)
+ if initial_fname:
+ self.assertEqual(self.attachment.store_fname, initial_fname)
+
+
+class TestAttachmentInFileSystemFileLikeAdapter(
+ TestFSAttachmentCommon, TestFSAttachmentFileLikeAdapterMixin
+):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.prepareClass()
+
+ def setUp(self):
+ super().setUp()
+ self.prepare()
+
+ @classmethod
+ def _create_attachment(cls):
+ return (
+ cls.env["ir.attachment"]
+ .with_context(
+ storage_location=cls.temp_backend.code,
+ storage_file_path="test.txt",
+ )
+ .create({"name": "test.txt", "raw": cls.initial_content})
+ )
+
+
+class TestAttachmentInDBFileLikeAdapter(
+ TestFSAttachmentCommon, TestFSAttachmentFileLikeAdapterMixin
+):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.prepareClass()
+
+ def setUp(self):
+ super().setUp()
+ self.env["ir.config_parameter"].sudo().set_param("ir_attachment.location", "db")
+ self.prepare()
+
+ def tearDown(self) -> None:
+ self.attachment.unlink()
+ super().tearDown()
+
+ @classmethod
+ def _create_attachment(cls):
+ return cls.env["ir.attachment"].create(
+ {"name": "test.txt", "raw": cls.initial_content}
+ )
+
+
+class TestAttachmentInFileFileLikeAdapter(
+ TestFSAttachmentCommon, TestFSAttachmentFileLikeAdapterMixin
+):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.prepareClass()
+
+ def setUp(self):
+ super().setUp()
+ self.env["ir.config_parameter"].sudo().set_param(
+ "ir_attachment.location", "file"
+ )
+ self.prepare()
+
+ def tearDown(self) -> None:
+ self.attachment.unlink()
+ self.attachment._gc_file_store_unsafe()
+ super().tearDown()
+
+ @classmethod
+ def _create_attachment(cls):
+ return cls.env["ir.attachment"].create(
+ {"name": "test.txt", "raw": cls.initial_content}
+ )
From f3923a0add9e9646708caf27b2e0b7fa38bea2f0 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 10 Jul 2023 09:30:07 +0200
Subject: [PATCH 44/47] [IMP] fs_attachment: Speedup install
Avoid recompute of new columns when installed on an existing database
---
fs_attachment/__init__.py | 1 +
fs_attachment/__manifest__.py | 1 +
fs_attachment/hooks.py | 33 +++++++++++++++++++++++++++++++++
3 files changed, 35 insertions(+)
create mode 100644 fs_attachment/hooks.py
diff --git a/fs_attachment/__init__.py b/fs_attachment/__init__.py
index 0650744f6b..6d58305f5d 100644
--- a/fs_attachment/__init__.py
+++ b/fs_attachment/__init__.py
@@ -1 +1,2 @@
from . import models
+from .hooks import pre_init_hook
diff --git a/fs_attachment/__manifest__.py b/fs_attachment/__manifest__.py
index 62affae50e..3a4deeacb9 100644
--- a/fs_attachment/__manifest__.py
+++ b/fs_attachment/__manifest__.py
@@ -20,4 +20,5 @@
"installable": True,
"auto_install": False,
"maintainers": ["lmignon"],
+ "pre_init_hook": "pre_init_hook",
}
diff --git a/fs_attachment/hooks.py b/fs_attachment/hooks.py
new file mode 100644
index 0000000000..bbb464389a
--- /dev/null
+++ b/fs_attachment/hooks.py
@@ -0,0 +1,33 @@
+# Copyright 2023 ACSONE SA/NV
+# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
+import logging
+
+_logger = logging.getLogger(__name__)
+
+
+def pre_init_hook(cr):
+ """Pre init hook."""
+ # add columns for computed fields to avoid useless computation by the ORM
+ # when installing the module
+ _logger.info("Add columns for computed fields on ir_attachment")
+ cr.execute(
+ """
+ ALTER TABLE ir_attachment
+ ADD COLUMN fs_storage_id INTEGER;
+ ALTER TABLE ir_attachment
+ ADD FOREIGN KEY (fs_storage_id) REFERENCES fs_storage(id);
+ """
+ )
+ cr.execute(
+ """
+ ALTER TABLE ir_attachment
+ ADD COLUMN fs_url VARCHAR;
+ """
+ )
+ cr.execute(
+ """
+ ALTER TABLE ir_attachment
+ ADD COLUMN fs_storage_code VARCHAR;
+ """
+ )
+ _logger.info("Columns added on ir_attachment")
From 732a3a4ad9662b18873fe957cc21ae31070a8e83 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 10 Jul 2023 14:30:10 +0200
Subject: [PATCH 45/47] [IMP] fs_attachment: Server Environement support
Allows to provide configuration parameters through server environement files.
---
fs_attachment/README.rst | 27 ++++++++++
fs_attachment/models/fs_file_gc.py | 4 +-
fs_attachment/models/fs_storage.py | 49 ++++++++++++------
fs_attachment/readme/USAGE.rst | 27 ++++++++++
fs_attachment/static/description/index.html | 50 ++++++++++++++-----
fs_attachment/tests/common.py | 12 +++++
.../tests/test_fs_attachment_internal_url.py | 13 +++++
7 files changed, 153 insertions(+), 29 deletions(-)
diff --git a/fs_attachment/README.rst b/fs_attachment/README.rst
index 3a27b3e165..921f0389f4 100644
--- a/fs_attachment/README.rst
+++ b/fs_attachment/README.rst
@@ -200,6 +200,33 @@ from URLs.
SEO. This option is disabled by default.
+Server Environment
+~~~~~~~~~~~~~~~~~~
+
+When you configure a storage through the use of server environment file, you can
+provide values for the following keys:
+
+* ``optimizes_directory_path``
+* ``autovacuum_gc``
+* ``base_url``
+* ``is_directory_path_in_url``
+* ``use_x_sendfile_to_serve_internal_url``
+* ``use_as_default_for_attachments``
+* ``force_db_for_default_attachment_rules``
+* ``use_filename_obfuscation``
+
+For example, the configuration of my storage with code `fsprod` used to store
+the attachments by default could be:
+
+.. code-block:: ini
+
+ [fs_storage.fsprod]
+ protocol=s3
+ options={"endpoint_url": "https://my_s3_server/", "key": "KEY", "secret": "SECRET"}
+ directory_path=my_bucket
+ use_as_default_for_attachments=True
+ use_filename_obfuscation=True
+
Advanced usage: Using attachment as a file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/fs_attachment/models/fs_file_gc.py b/fs_attachment/models/fs_file_gc.py
index 72d5ec3f3c..a01690910d 100644
--- a/fs_attachment/models/fs_file_gc.py
+++ b/fs_attachment/models/fs_file_gc.py
@@ -123,8 +123,10 @@ def _gc_files(self) -> None:
def _gc_files_unsafe(self) -> None:
# get the list of fs.storage codes that must be autovacuumed
codes = (
- self.env["fs.storage"].search([("autovacuum_gc", "=", True)]).mapped("code")
+ self.env["fs.storage"].search([]).filtered("autovacuum_gc").mapped("code")
)
+ if not codes:
+ return
# we process by batch of storage codes.
self._cr.execute(
"""
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
index b7951cb107..a0ea146e27 100644
--- a/fs_attachment/models/fs_storage.py
+++ b/fs_attachment/models/fs_storage.py
@@ -77,13 +77,32 @@ class FsStorage(models.Model):
"the filename is important for SEO.",
)
- _sql_constraints = [
- (
- "use_as_default_for_attachments_unique",
- "unique(use_as_default_for_attachments)",
- "Only one storage can be used as default for attachments",
+ @api.constrains("use_as_default_for_attachments")
+ def _check_use_as_default_for_attachments(self):
+ # constrains are checked in python since values can be provided by
+ # the server environment
+ defaults = self.search([]).filtered("use_as_default_for_attachments")
+ if len(defaults) > 1:
+ raise ValidationError(
+ _("Only one storage can be used as default for attachments")
+ )
+
+ @property
+ def _server_env_fields(self):
+ env_fields = super()._server_env_fields
+ env_fields.update(
+ {
+ "optimizes_directory_path": {},
+ "autovacuum_gc": {},
+ "base_url": {},
+ "is_directory_path_in_url": {},
+ "use_x_sendfile_to_serve_internal_url": {},
+ "use_as_default_for_attachments": {},
+ "force_db_for_default_attachment_rules": {},
+ "use_filename_obfuscation": {},
+ }
)
- ]
+ return env_fields
@property
def _default_force_db_for_default_attachment_rules(self) -> str:
@@ -140,9 +159,11 @@ def _check_force_db_for_default_attachment_rules(self):
@tools.ormcache()
def get_default_storage_code_for_attachments(self):
"""Return the code of the storage to use to store by default the attachments"""
- storage = self.search([("use_as_default_for_attachments", "=", True)], limit=1)
- if storage:
- return storage.code
+ storages = self.search([]).filtered_domain(
+ [("use_as_default_for_attachments", "=", True)]
+ )
+ if storages:
+ return storages[0].code
return None
@api.model
@@ -163,21 +184,17 @@ def get_force_db_for_default_attachment_rules(self, code):
@api.model
@tools.ormcache("code")
def _must_optimize_directory_path(self, code):
- return bool(
- self.search([("code", "=", code), ("optimizes_directory_path", "=", True)])
- )
+ return self.get_by_code(code).optimizes_directory_path
@api.model
@tools.ormcache("code")
def _must_autovacuum_gc(self, code):
- return bool(self.search([("code", "=", code), ("autovacuum_gc", "=", True)]))
+ return self.get_by_code(code).autovacuum_gc
@api.model
@tools.ormcache("code")
def _must_use_filename_obfuscation(self, code):
- return bool(
- self.search([("code", "=", code), ("use_filename_obfuscation", "=", True)])
- )
+ return self.get_by_code(code).use_filename_obfuscation
@api.depends("base_url", "is_directory_path_in_url")
def _compute_base_url_for_files(self):
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
index 2bd95e3dc7..acbf3ced00 100644
--- a/fs_attachment/readme/USAGE.rst
+++ b/fs_attachment/readme/USAGE.rst
@@ -119,6 +119,33 @@ from URLs.
SEO. This option is disabled by default.
+Server Environment
+~~~~~~~~~~~~~~~~~~
+
+When you configure a storage through the use of server environment file, you can
+provide values for the following keys:
+
+* ``optimizes_directory_path``
+* ``autovacuum_gc``
+* ``base_url``
+* ``is_directory_path_in_url``
+* ``use_x_sendfile_to_serve_internal_url``
+* ``use_as_default_for_attachments``
+* ``force_db_for_default_attachment_rules``
+* ``use_filename_obfuscation``
+
+For example, the configuration of my storage with code `fsprod` used to store
+the attachments by default could be:
+
+.. code-block:: ini
+
+ [fs_storage.fsprod]
+ protocol=s3
+ options={"endpoint_url": "https://my_s3_server/", "key": "KEY", "secret": "SECRET"}
+ directory_path=my_bucket
+ use_as_default_for_attachments=True
+ use_filename_obfuscation=True
+
Advanced usage: Using attachment as a file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/fs_attachment/static/description/index.html b/fs_attachment/static/description/index.html
index bf82a6988f..9bbfeb1df7 100644
--- a/fs_attachment/static/description/index.html
+++ b/fs_attachment/static/description/index.html
@@ -415,15 +415,16 @@ Base Attachment Object Store
+
+
+
When you configure a storage through the use of server environment file, you can
+provide values for the following keys:
+
+- optimizes_directory_path
+- autovacuum_gc
+- base_url
+- is_directory_path_in_url
+- use_x_sendfile_to_serve_internal_url
+- use_as_default_for_attachments
+- force_db_for_default_attachment_rules
+- use_filename_obfuscation
+
+
For example, the configuration of my storage with code fsprod used to store
+the attachments by default could be:
+
+[fs_storage.fsprod]
+protocol=s3
+options={"endpoint_url": "https://my_s3_server/", "key": "KEY", "secret": "SECRET"}
+directory_path=my_bucket
+use_as_default_for_attachments=True
+use_filename_obfuscation=True
+
+
-
+
The open method on the attachment can be used to open manipulate the attachment
as a file object. The object returned by the call to the method implements
methods from io.IOBase. The method can ba called as any other python method.
@@ -618,16 +644,16 @@
Do not contact contributors directly about support or help with technical issues.
-
+
-
+
This module is maintained by the OCA.
OCA, or the Odoo Community Association, is a nonprofit organization whose
diff --git a/fs_attachment/tests/common.py b/fs_attachment/tests/common.py
index 8369fd9732..95ea76d006 100644
--- a/fs_attachment/tests/common.py
+++ b/fs_attachment/tests/common.py
@@ -29,6 +29,18 @@ def setUpClass(cls):
def cleanup_tempdir():
shutil.rmtree(temp_dir)
+ def setUp(self):
+ super().setUp()
+ # enforce temp_backend field since it seems that they are reset on
+ # savepoint rollback when managed by server_environment -> TO Be investigated
+ self.temp_backend.write(
+ {
+ "protocol": "file",
+ "code": "tmp_dir",
+ "directory_path": self.temp_dir,
+ }
+ )
+
def tearDown(self) -> None:
super().tearDown()
# empty the temp dir
diff --git a/fs_attachment/tests/test_fs_attachment_internal_url.py b/fs_attachment/tests/test_fs_attachment_internal_url.py
index e805a87008..0dac94c72d 100644
--- a/fs_attachment/tests/test_fs_attachment_internal_url.py
+++ b/fs_attachment/tests/test_fs_attachment_internal_url.py
@@ -40,6 +40,19 @@ def setUpClass(cls):
def cleanup_tempdir():
shutil.rmtree(temp_dir)
+ def setUp(self):
+ super().setUp()
+ # enforce temp_backend field since it seems that they are reset on
+ # savepoint rollback when managed by server_environment -> TO Be investigated
+ self.temp_backend.write(
+ {
+ "protocol": "file",
+ "code": "tmp_dir",
+ "directory_path": self.temp_dir,
+ "base_url": "http://my.public.files/",
+ }
+ )
+
@classmethod
def tearDownClass(cls):
super().tearDownClass()
From 49c826bc70afbed211a24c8d8e03353fdcd6a97a Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 10 Jul 2023 14:36:30 +0200
Subject: [PATCH 46/47] [IMP] fs_attachment: Simplify code.
Remove code used to try to read file from the root filesystem and write into the specialized filesystem. This code was used to try to provide a way to manage staging environments by reusing the same filesystem storage but with a different directory_path depending of the environement. A simpler method is to configure use a different filesystem storage by environement. If a production database is restored in pre production env, you can declare a new filesystem storage with a different code to store the attachements by default and configure the filesystem storage from the production with information allowing to read documents stored in it but not to modify or delete existing documents. This make the implementation far more simple.
---
fs_attachment/README.rst | 16 ++----
fs_attachment/fs_stream.py | 3 +-
fs_attachment/models/fs_file_gc.py | 15 +----
fs_attachment/models/fs_storage.py | 6 +-
fs_attachment/models/ir_attachment.py | 64 ++++++++++++---------
fs_attachment/readme/USAGE.rst | 16 ++----
fs_attachment/static/description/index.html | 20 +++----
fs_attachment/tests/test_fs_attachment.py | 20 +++----
8 files changed, 72 insertions(+), 88 deletions(-)
diff --git a/fs_attachment/README.rst b/fs_attachment/README.rst
index 921f0389f4..28c2e4bfc6 100644
--- a/fs_attachment/README.rst
+++ b/fs_attachment/README.rst
@@ -282,16 +282,12 @@ Tips & Tricks
the attachments shared between the two instances BUT you don't want to have
one instance removing or modifying the attachments of the other instance.
- To do so, you can configure the same filesystem storage on both instances and
- use a different directory path. (For S3 storage, directory path is the bucket
- name). When a file is written in the filesystem storage, it's always written into
- the directory path configured on the storage and full path of the file is stored
- in the database. When reading a file, it's always read from the full path stored
- in the database. So if you have two instances using the same storage with different
- directory paths, files written in each instance will be stored in different
- directories but be accessible from the other instance. A check is also done when
- an attachment is removed to ensure that only files stored in the current directory
- path are removed.
+ To do so, you can add on your staging instances a new storage and declare it
+ as the default storage to use for attachments. This way, all the new attachments
+ will be stored in this new storage but the attachments created on the production
+ instance will still be read from the production storage. Be careful to adapt the
+ configuration of your storage to the production environment to make it read only.
+ (The use of server environment files is a good way to do so).
Bug Tracker
===========
diff --git a/fs_attachment/fs_stream.py b/fs_attachment/fs_stream.py
index 018068587a..fafc233892 100644
--- a/fs_attachment/fs_stream.py
+++ b/fs_attachment/fs_stream.py
@@ -23,7 +23,8 @@ def from_fs_attachment(cls, attachment: IrAttachment) -> FsStream:
raise ValueError("Attachment is not stored into a filesystem storage")
size = 0
if cls._check_use_x_sendfile(attachment):
- fs_info = attachment.fs_storage_id.root_fs.info(attachment.fs_filename)
+ fs, _storage, fname = attachment._get_fs_parts()
+ fs_info = fs.info(fname)
size = fs_info["size"]
return cls(
mimetype=attachment.mimetype,
diff --git a/fs_attachment/models/fs_file_gc.py b/fs_attachment/models/fs_file_gc.py
index a01690910d..ad57303ead 100644
--- a/fs_attachment/models/fs_file_gc.py
+++ b/fs_attachment/models/fs_file_gc.py
@@ -39,7 +39,7 @@ def _is_test_mode(self) -> bool:
@contextmanager
def _in_new_cursor(self) -> Cursor:
"""Context manager to execute code in a new cursor"""
- if self._is_test_mode():
+ if self._is_test_mode() or not self.env.registry.ready:
yield self.env.cr
return
@@ -149,20 +149,11 @@ def _gc_files_unsafe(self) -> None:
(tuple(codes),),
)
for code, store_fnames in self._cr.fetchall():
- storage = self.env["fs.storage"].get_by_code(code)
- fs = self.env["fs.storage"].get_fs_by_code(code, root=True)
+ self.env["fs.storage"].get_by_code(code)
+ fs = self.env["fs.storage"].get_fs_by_code(code)
for store_fname in store_fnames:
try:
file_path = store_fname.partition("://")[2]
- if storage.directory_path and not file_path.startswith(
- storage.directory_path
- ):
- _logger.debug(
- "File %s is not in the storage directory %s",
- store_fname,
- storage.directory_path,
- )
- continue
fs.rm(file_path)
except Exception:
_logger.debug("Failed to remove file %s", store_fname)
diff --git a/fs_attachment/models/fs_storage.py b/fs_attachment/models/fs_storage.py
index a0ea146e27..60203e865a 100644
--- a/fs_attachment/models/fs_storage.py
+++ b/fs_attachment/models/fs_storage.py
@@ -176,7 +176,7 @@ def get_force_db_for_default_attachment_rules(self, code):
and the value is the limit in size below which attachments are kept in DB.
0 means no limit.
"""
- storage = self.search([("code", "=", code)], limit=1)
+ storage = self.get_by_code(code)
if storage and storage.force_db_for_default_attachment_rules:
return const_eval(storage.force_db_for_default_attachment_rules)
return {}
@@ -224,8 +224,8 @@ def _get_url_for_attachment(
return None
if exclude_base_url:
base_url = base_url.replace(fs_storage.base_url.rstrip("/"), "") or "/"
- # always remove the directory_path from the fs_file_name
- # ony if it's at the start of the filename
+ # always remove the directory_path from the fs_filename
+ # only if it's at the start of the filename
fs_filename = attachment.fs_filename
if fs_filename.startswith(fs_storage.directory_path):
fs_filename = fs_filename.replace(fs_storage.directory_path, "")
diff --git a/fs_attachment/models/ir_attachment.py b/fs_attachment/models/ir_attachment.py
index 4d7d67a246..20d3aa46aa 100644
--- a/fs_attachment/models/ir_attachment.py
+++ b/fs_attachment/models/ir_attachment.py
@@ -318,7 +318,7 @@ def _set_attachment_data(self, asbytes) -> None: # pylint: disable=missing-retu
@api.model
def _storage_file_read(self, fname: str) -> bytes | None:
"""Read the file from the filesystem storage"""
- fs, _storage, fname = self._fs_parse_store_fname(fname, root=True)
+ fs, _storage, fname = self._fs_parse_store_fname(fname)
with fs.open(fname, "rb") as fs:
return fs.read()
@@ -377,7 +377,9 @@ def _build_fs_filename(self):
extension = mimetypes.guess_extension(self.mimetype)
version = 0
if self.fs_filename:
- version = self._parse_fs_filename(self.fs_filename)[2] + 1
+ parsed = self._parse_fs_filename(self.fs_filename)
+ if parsed:
+ version = parsed[2] + 1
return "{}{}".format(
slugify(
"{}-{}-{}".format(filename, self.id, version),
@@ -400,17 +402,19 @@ def _enforce_meaningful_storage_filename(self) -> None:
for attachment in self:
if not self._is_file_from_a_storage(attachment.store_fname):
continue
- fs, storage, filename = self._fs_parse_store_fname(attachment.store_fname)
+ fs, storage, filename = self._get_fs_parts()
+
if self.env["fs.storage"]._must_use_filename_obfuscation(storage):
- attachment.fs_filename = fs.info(filename)["name"]
+ attachment.fs_filename = filename
continue
if self._is_fs_filename_meaningful(filename):
continue
new_filename = attachment._build_fs_filename()
# we must keep the same full path as the original filename
- new_filename = os.path.join(os.path.dirname(filename), new_filename)
- fs.rename(filename, new_filename)
- new_filename = fs.info(new_filename)["name"]
+ new_filename_with_path = os.path.join(
+ os.path.dirname(filename), new_filename
+ )
+ fs.rename(filename, new_filename_with_path)
attachment.fs_filename = new_filename
# we need to update the store_fname with the new filename by
# calling the write method of the field since the write method
@@ -429,17 +433,18 @@ def _force_write_store_fname(self, store_fname):
@api.model
def _get_fs_storage_for_code(
- self, code: str, root: bool = False
+ self,
+ code: str,
) -> fsspec.AbstractFileSystem | None:
"""Return the filesystem for the given storage code"""
- fs = self.env["fs.storage"].get_fs_by_code(code, root=root)
+ fs = self.env["fs.storage"].get_fs_by_code(code)
if not fs:
raise SystemError(f"No Filesystem storage for code {code}")
return fs
@api.model
def _fs_parse_store_fname(
- self, fname: str, root: bool = False
+ self, fname: str
) -> tuple[fsspec.AbstractFileSystem, str, str]:
"""Return the filesystem, the storage code and the path for the given fname
@@ -448,7 +453,7 @@ def _fs_parse_store_fname(
"""
partition = fname.partition("://")
storage_code = partition[0]
- fs = self._get_fs_storage_for_code(storage_code, root=root)
+ fs = self._get_fs_storage_for_code(storage_code)
fname = partition[2]
return fs, storage_code, fname
@@ -520,6 +525,14 @@ def _fs_mark_for_gc(self, fname):
"""
self.env["fs.file.gc"]._mark_for_gc(fname)
+ def _get_fs_parts(
+ self,
+ ) -> tuple[fsspec.AbstractFileSystem, str, str] | tuple[None, None, None]:
+ """Return the filesystem, the storage code and the path for the current attachment"""
+ if not self.store_fname:
+ return None, None, None
+ return self._fs_parse_store_fname(self.store_fname)
+
def open(
self,
mode="rb",
@@ -933,9 +946,7 @@ def _file_open(self) -> io.IOBase:
or self._is_stored_in_db
):
if self.attachment._is_file_from_a_storage(self.attachment.store_fname):
- fs, _storage, fname = self.attachment._fs_parse_store_fname(
- self.attachment.store_fname, root=True
- )
+ fs, _storage, fname = self.attachment._get_fs_parts()
filepath = fname
filesystem = fs
elif self.attachment.store_fname:
@@ -966,18 +977,12 @@ def _file_open(self) -> io.IOBase:
checksum = self.attachment._compute_checksum(content)
new_store_fname = self.attachment._file_write(content, checksum)
if self.attachment._is_file_from_a_storage(new_store_fname):
- # the new store_fname is a path from the specified storage
- # the store_fname into the attachement is expressed from the
- # root filesystem. This is done on purpose to always read
- # from the root filesystem and write to the specialized one
- fs, _storage, fname = self.attachment._fs_parse_store_fname(
- new_store_fname, root=False
- )
- new_filepath = fs.info(fname)["name"]
- root_fs, _storage, old_filepath = self.attachment._fs_parse_store_fname(
- self.attachment.store_fname, root=True
- )
- filesystem = root_fs
+ (
+ filesystem,
+ _storage,
+ new_filepath,
+ ) = self.attachment._fs_parse_store_fname(new_store_fname)
+ _fs, _storage, old_filepath = self.attachment._get_fs_parts()
else:
new_filepath = self.attachment._full_path(new_store_fname)
old_filepath = self.attachment._full_path(self.attachment.store_fname)
@@ -1023,8 +1028,11 @@ def _file_close(self):
def _get_attachment_data(self) -> dict:
ret = {}
if self._file:
- ret["checksum"] = self._filesystem.checksum(self._file.path)
- ret["file_size"] = self._filesystem.size(self._file.path)
+ file_path = self._file.path
+ if hasattr(self._filesystem, "path"):
+ file_path = file_path.replace(self._filesystem.path, "")
+ ret["checksum"] = self._filesystem.checksum(file_path)
+ ret["file_size"] = self._filesystem.size(file_path)
# TODO index_content is too expensive to compute here or should be configurable
# data = self._file.read()
# ret["index_content"] = self.attachment._index_content(data,
diff --git a/fs_attachment/readme/USAGE.rst b/fs_attachment/readme/USAGE.rst
index acbf3ced00..9cd3b1b002 100644
--- a/fs_attachment/readme/USAGE.rst
+++ b/fs_attachment/readme/USAGE.rst
@@ -201,13 +201,9 @@ Tips & Tricks
the attachments shared between the two instances BUT you don't want to have
one instance removing or modifying the attachments of the other instance.
- To do so, you can configure the same filesystem storage on both instances and
- use a different directory path. (For S3 storage, directory path is the bucket
- name). When a file is written in the filesystem storage, it's always written into
- the directory path configured on the storage and full path of the file is stored
- in the database. When reading a file, it's always read from the full path stored
- in the database. So if you have two instances using the same storage with different
- directory paths, files written in each instance will be stored in different
- directories but be accessible from the other instance. A check is also done when
- an attachment is removed to ensure that only files stored in the current directory
- path are removed.
+ To do so, you can add on your staging instances a new storage and declare it
+ as the default storage to use for attachments. This way, all the new attachments
+ will be stored in this new storage but the attachments created on the production
+ instance will still be read from the production storage. Be careful to adapt the
+ configuration of your storage to the production environment to make it read only.
+ (The use of server environment files is a good way to do so).
diff --git a/fs_attachment/static/description/index.html b/fs_attachment/static/description/index.html
index 9bbfeb1df7..1d4569bb60 100644
--- a/fs_attachment/static/description/index.html
+++ b/fs_attachment/static/description/index.html
@@ -614,29 +614,25 @@
-
+
When working in multi staging environments, the management of the attachments
can be tricky. For example, if you have a production instance and a staging
instance based on a backup of the production environment, you may want to have
the attachments shared between the two instances BUT you don’t want to have
one instance removing or modifying the attachments of the other instance.
-To do so, you can configure the same filesystem storage on both instances and
-use a different directory path. (For S3 storage, directory path is the bucket
-name). When a file is written in the filesystem storage, it’s always written into
-the directory path configured on the storage and full path of the file is stored
-in the database. When reading a file, it’s always read from the full path stored
-in the database. So if you have two instances using the same storage with different
-directory paths, files written in each instance will be stored in different
-directories but be accessible from the other instance. A check is also done when
-an attachment is removed to ensure that only files stored in the current directory
-path are removed.
+To do so, you can add on your staging instances a new storage and declare it
+as the default storage to use for attachments. This way, all the new attachments
+will be stored in this new storage but the attachments created on the production
+instance will still be read from the production storage. Be careful to adapt the
+configuration of your storage to the production environment to make it read only.
+(The use of server environment files is a good way to do so).
-
+
Bugs are tracked on GitHub Issues.
In case of trouble, please check there if your issue has already been reported.
If you spotted it first, help us smashing it by providing a detailed and welcomed
diff --git a/fs_attachment/tests/test_fs_attachment.py b/fs_attachment/tests/test_fs_attachment.py
index db98d604c3..ce304c3d8f 100644
--- a/fs_attachment/tests/test_fs_attachment.py
+++ b/fs_attachment/tests/test_fs_attachment.py
@@ -15,7 +15,7 @@ def test_create_attachment_explicit_location(self):
self.env["ir.attachment"]
.with_context(
storage_location=self.temp_backend.code,
- storage_file_path="test.txt",
+ force_storage_key="test.txt",
)
.create({"name": "test.txt", "raw": content})
)
@@ -93,9 +93,7 @@ def test_default_attachment_store_in_fs(self):
with open(os.path.join(self.temp_dir, new_filename), "rb") as f:
self.assertEqual(f.read(), b"new")
self.assertEqual(attachment.raw, b"new")
- self.assertEqual(
- attachment.store_fname, f"tmp_dir://{self.temp_dir}/{new_filename}"
- )
+ self.assertEqual(attachment.store_fname, f"tmp_dir://{new_filename}")
self.assertEqual(attachment.mimetype, "text/plain")
# the original file is to to be deleted by the GC
@@ -128,7 +126,7 @@ def test_fs_update_transactionnal(self):
self.env.flush_all()
self.assertEqual(attachment.raw, content)
- initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ initial_filename = f"test-{attachment.id}-0.txt"
self.assertEqual(attachment.store_fname, f"tmp_dir://{initial_filename}")
self.assertEqual(attachment.fs_filename, initial_filename)
@@ -140,7 +138,7 @@ def test_fs_update_transactionnal(self):
try:
with self.env.cr.savepoint():
attachment.raw = b"updated"
- new_filename = f"{self.temp_dir}/test-{attachment.id}-1.txt"
+ new_filename = f"test-{attachment.id}-1.txt"
new_store_fname = f"tmp_dir://{new_filename}"
self.assertEqual(attachment.store_fname, new_store_fname)
self.assertEqual(attachment.fs_filename, new_filename)
@@ -185,7 +183,7 @@ def test_fs_create_transactional(self):
)
self.env.flush_all()
self.assertEqual(attachment.raw, content)
- initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ initial_filename = f"test-{attachment.id}-0.txt"
self.assertEqual(
attachment.store_fname, f"tmp_dir://{initial_filename}"
)
@@ -221,7 +219,7 @@ def test_fs_no_delete_if_not_in_current_directory_path(self):
{"name": "test.txt", "raw": content}
)
self.env.flush_all()
- initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ initial_filename = f"test-{attachment.id}-0.txt"
self.assertEqual(
os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
)
@@ -233,7 +231,7 @@ def test_fs_no_delete_if_not_in_current_directory_path(self):
{"name": "test.txt", "raw": content}
)
self.env.flush_all()
- initial_filename = f"{self.temp_dir}/test-{attachment.id}-0.txt"
+ initial_filename = f"test-{attachment.id}-0.txt"
self.assertEqual(
os.listdir(self.temp_dir), [os.path.basename(initial_filename)]
)
@@ -326,9 +324,7 @@ def test_force_storage_to_fs(self):
clean_fs.assert_called_once()
# files into the filestore must be moved to our filesystem storage
filename = f"test-{attachment.id}-0.txt"
- self.assertEqual(
- attachment.store_fname, f"tmp_dir://{self.temp_dir}/{filename}"
- )
+ self.assertEqual(attachment.store_fname, f"tmp_dir://{filename}")
self.assertIn(filename, os.listdir(self.temp_dir))
def test_storage_use_filename_obfuscation(self):
From bb18b7f591e3f3c2ae34efb239d23c31bfdf9132 Mon Sep 17 00:00:00 2001
From: "Laurent Mignon (ACSONE)"
Date: Mon, 10 Jul 2023 18:13:27 +0200
Subject: [PATCH 47/47] [FIX] fs_attachment: No new registry creation
To create a new cursor, just ask to the current registry.... Loading a registry is very time consuming and could lead to deadlocks...
---
fs_attachment/models/fs_file_gc.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/fs_attachment/models/fs_file_gc.py b/fs_attachment/models/fs_file_gc.py
index ad57303ead..6ab70ec38e 100644
--- a/fs_attachment/models/fs_file_gc.py
+++ b/fs_attachment/models/fs_file_gc.py
@@ -4,7 +4,6 @@
import threading
from contextlib import closing, contextmanager
-import odoo
from odoo import api, fields, models
from odoo.sql_db import Cursor
@@ -43,8 +42,7 @@ def _in_new_cursor(self) -> Cursor:
yield self.env.cr
return
- registry = odoo.modules.registry.Registry.new(self.env.cr.dbname)
- with closing(registry.cursor()) as cr:
+ with closing(self.env.registry.cursor()) as cr:
try:
yield cr
except Exception: