From bfad465450e9370623103c6445561959adcc544a Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 21 Apr 2021 19:18:23 +0300 Subject: [PATCH 1/6] IGNITE-14595 Implement ExpiryPolicy support. --- pyignite/aio_cache.py | 57 +-- pyignite/aio_client.py | 13 +- pyignite/aio_cluster.py | 5 +- pyignite/api/cache_config.py | 38 +- pyignite/api/key_value.py | 494 ++++++++++-------------- pyignite/api/sql.py | 78 ++-- pyignite/cache.py | 92 +++-- pyignite/client.py | 17 +- pyignite/cluster.py | 5 +- pyignite/connection/protocol_context.py | 3 + pyignite/cursors.py | 58 +-- pyignite/datatypes/__init__.py | 2 + pyignite/datatypes/cache_config.py | 74 ++-- pyignite/datatypes/cache_properties.py | 9 +- pyignite/datatypes/expiry_policy.py | 87 +++++ pyignite/datatypes/prop_codes.py | 1 + pyignite/queries/query.py | 33 +- tests/affinity/test_affinity.py | 4 +- tests/common/test_expiry_policy.py | 151 ++++++++ tests/custom/test_cluster.py | 2 +- 20 files changed, 717 insertions(+), 506 deletions(-) create mode 100644 pyignite/datatypes/expiry_policy.py create mode 100644 tests/common/test_expiry_policy.py diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index 32f2cb2..f088844 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -15,6 +15,7 @@ import asyncio from typing import Any, Iterable, Optional, Union +from .datatypes import ExpiryPolicy from .datatypes.internal import AnyDataObject from .exceptions import CacheCreationError, CacheError, ParameterError from .utils import status_to_exception @@ -80,17 +81,17 @@ class AioCache(BaseCache): :py:meth:`~pyignite.aio_client.AioClient.get_cache` methods instead. See :ref:`this example ` on how to do it. """ - def __init__(self, client: 'AioClient', name: str): + def __init__(self, client: 'AioClient', name: str, expiry_policy: ExpiryPolicy = None): """ Initialize async cache object. For internal use. :param client: Async Ignite client, :param name: Cache name. """ - super().__init__(client, name) + super().__init__(client, name, expiry_policy) async def _get_best_node(self, key=None, key_hint=None): - return await self.client.get_best_node(self._cache_id, key, key_hint) + return await self.client.get_best_node(self, key, key_hint) async def settings(self) -> Optional[dict]: """ @@ -103,7 +104,7 @@ async def settings(self) -> Optional[dict]: """ if self._settings is None: conn = await self._get_best_node() - config_result = await cache_get_configuration_async(conn, self._cache_id) + config_result = await cache_get_configuration_async(conn, self.cache_info) if config_result.status == 0: self._settings = config_result.value @@ -118,7 +119,7 @@ async def destroy(self): Destroys cache with a given name. """ conn = await self._get_best_node() - return await cache_destroy_async(conn, self._cache_id) + return await cache_destroy_async(conn, self.cache_id) @status_to_exception(CacheError) async def get(self, key, key_hint: object = None) -> Any: @@ -134,7 +135,7 @@ async def get(self, key, key_hint: object = None) -> Any: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_async(conn, self._cache_id, key, key_hint=key_hint) + result = await cache_get_async(conn, self.cache_info, key, key_hint=key_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -155,7 +156,7 @@ async def put(self, key, value, key_hint: object = None, value_hint: object = No key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_put_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) + return await cache_put_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) @status_to_exception(CacheError) async def get_all(self, keys: list) -> list: @@ -166,7 +167,7 @@ async def get_all(self, keys: list) -> list: :return: a dict of key-value pairs. """ conn = await self._get_best_node() - result = await cache_get_all_async(conn, self._cache_id, keys) + result = await cache_get_all_async(conn, self.cache_info, keys) if result.value: keys = list(result.value.keys()) values = await asyncio.gather(*[self.client.unwrap_binary(value) for value in result.value.values()]) @@ -186,7 +187,7 @@ async def put_all(self, pairs: dict): Python type or a tuple of (item, hint), """ conn = await self._get_best_node() - return await cache_put_all_async(conn, self._cache_id, pairs) + return await cache_put_all_async(conn, self.cache_info, pairs) @status_to_exception(CacheError) async def replace(self, key, value, key_hint: object = None, value_hint: object = None): @@ -204,7 +205,7 @@ async def replace(self, key, value, key_hint: object = None, value_hint: object key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_replace_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) + result = await cache_replace_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -218,9 +219,9 @@ async def clear(self, keys: Optional[list] = None): """ conn = await self._get_best_node() if keys: - return await cache_clear_keys_async(conn, self._cache_id, keys) + return await cache_clear_keys_async(conn, self.cache_info, keys) else: - return await cache_clear_async(conn, self._cache_id) + return await cache_clear_async(conn, self.cache_info) @status_to_exception(CacheError) async def clear_key(self, key, key_hint: object = None): @@ -235,7 +236,7 @@ async def clear_key(self, key, key_hint: object = None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_clear_key_async(conn, self._cache_id, key, key_hint=key_hint) + return await cache_clear_key_async(conn, self.cache_info, key, key_hint=key_hint) @status_to_exception(CacheError) async def clear_keys(self, keys: Iterable): @@ -245,7 +246,7 @@ async def clear_keys(self, keys: Iterable): :param keys: a list of keys or (key, type hint) tuples """ conn = await self._get_best_node() - return await cache_clear_keys_async(conn, self._cache_id, keys) + return await cache_clear_keys_async(conn, self.cache_info, keys) @status_to_exception(CacheError) async def contains_key(self, key, key_hint=None) -> bool: @@ -261,7 +262,7 @@ async def contains_key(self, key, key_hint=None) -> bool: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_contains_key_async(conn, self._cache_id, key, key_hint=key_hint) + return await cache_contains_key_async(conn, self.cache_info, key, key_hint=key_hint) @status_to_exception(CacheError) async def contains_keys(self, keys: Iterable) -> bool: @@ -272,7 +273,7 @@ async def contains_keys(self, keys: Iterable) -> bool: :return: boolean `True` when all keys are present, `False` otherwise. """ conn = await self._get_best_node() - return await cache_contains_keys_async(conn, self._cache_id, keys) + return await cache_contains_keys_async(conn, self.cache_info, keys) @status_to_exception(CacheError) async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -292,7 +293,7 @@ async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_put_async(conn, self._cache_id, key, value, key_hint, value_hint) + result = await cache_get_and_put_async(conn, self.cache_info, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -315,7 +316,7 @@ async def get_and_put_if_absent(self, key, value, key_hint=None, value_hint=None key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) + result = await cache_get_and_put_if_absent_async(conn, self.cache_info, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -336,7 +337,7 @@ async def put_if_absent(self, key, value, key_hint=None, value_hint=None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) + return await cache_put_if_absent_async(conn, self.cache_info, key, value, key_hint, value_hint) @status_to_exception(CacheError) async def get_and_remove(self, key, key_hint=None) -> Any: @@ -352,7 +353,7 @@ async def get_and_remove(self, key, key_hint=None) -> Any: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_remove_async(conn, self._cache_id, key, key_hint) + result = await cache_get_and_remove_async(conn, self.cache_info, key, key_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -375,7 +376,7 @@ async def get_and_replace(self, key, value, key_hint=None, value_hint=None) -> A key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_replace_async(conn, self._cache_id, key, value, key_hint, value_hint) + result = await cache_get_and_replace_async(conn, self.cache_info, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -392,7 +393,7 @@ async def remove_key(self, key, key_hint=None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_remove_key_async(conn, self._cache_id, key, key_hint) + return await cache_remove_key_async(conn, self.cache_info, key, key_hint) @status_to_exception(CacheError) async def remove_keys(self, keys: list): @@ -403,7 +404,7 @@ async def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ conn = await self._get_best_node() - return await cache_remove_keys_async(conn, self._cache_id, keys) + return await cache_remove_keys_async(conn, self.cache_info, keys) @status_to_exception(CacheError) async def remove_all(self): @@ -411,7 +412,7 @@ async def remove_all(self): Removes all cache entries, notifying listeners and cache writers. """ conn = await self._get_best_node() - return await cache_remove_all_async(conn, self._cache_id) + return await cache_remove_all_async(conn, self.cache_info) @status_to_exception(CacheError) async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -430,7 +431,7 @@ async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_remove_if_equals_async(conn, self._cache_id, key, sample, key_hint, sample_hint) + return await cache_remove_if_equals_async(conn, self.cache_info, key, sample, key_hint, sample_hint) @status_to_exception(CacheError) async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint=None, value_hint=None) -> Any: @@ -453,7 +454,7 @@ async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_replace_if_equals_async(conn, self._cache_id, key, sample, value, key_hint, sample_hint, + result = await cache_replace_if_equals_async(conn, self.cache_info, key, sample, value, key_hint, sample_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -469,7 +470,7 @@ async def get_size(self, peek_modes=None): :return: integer number of cache entries. """ conn = await self._get_best_node() - return await cache_get_size_async(conn, self._cache_id, peek_modes) + return await cache_get_size_async(conn, self.cache_info, peek_modes) def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> AioScanCursor: """ @@ -484,4 +485,4 @@ def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> on local node only. Defaults to False, :return: async scan query cursor """ - return AioScanCursor(self.client, self._cache_id, page_size, partitions, local) + return AioScanCursor(self.client, self.cache_info, page_size, partitions, local) diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 7a5959d..40fd722 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -29,6 +29,7 @@ from .constants import AFFINITY_RETRIES, AFFINITY_DELAY from .datatypes import BinaryObject from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .queries.query import CacheInfo from .stream import AioBinaryStream, READ_BACKWARD from .utils import cache_id, entity_id, status_to_exception, is_wrapped @@ -452,13 +453,17 @@ def sql( :return: async sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ + if isinstance(cache, (int, str)): + c_info = CacheInfo(cache_id=cache_id(cache), protocol_context=self.protocol_context) + elif isinstance(cache, AioCache): + c_info = cache.cache_info + else: + c_info = None - c_id = cache.cache_id if isinstance(cache, AioCache) else cache_id(cache) - - if c_id != 0: + if c_info: schema = None - return AioSqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, + return AioSqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout) diff --git a/pyignite/aio_cluster.py b/pyignite/aio_cluster.py index 6d76125..08d1f34 100644 --- a/pyignite/aio_cluster.py +++ b/pyignite/aio_cluster.py @@ -18,6 +18,7 @@ whole cluster asynchronously. """ from pyignite.api.cluster import cluster_get_state_async, cluster_set_state_async +from pyignite.datatypes import ClusterState from pyignite.exceptions import ClusterError from pyignite.utils import status_to_exception @@ -33,7 +34,7 @@ def __init__(self, client: 'AioClient'): self._client = client @status_to_exception(ClusterError) - async def get_state(self): + async def get_state(self) -> 'ClusterState': """ Gets current cluster state. @@ -43,7 +44,7 @@ async def get_state(self): return await cluster_get_state_async(await self._client.random_node()) @status_to_exception(ClusterError) - async def set_state(self, state): + async def set_state(self, state: 'ClusterState'): """ Changes current cluster state to the given. diff --git a/pyignite/api/cache_config.py b/pyignite/api/cache_config.py index 0adb549..511c18d 100644 --- a/pyignite/api/cache_config.py +++ b/pyignite/api/cache_config.py @@ -26,9 +26,9 @@ from typing import Union from pyignite.connection import Connection, AioConnection -from pyignite.datatypes.cache_config import cache_config_struct +from pyignite.datatypes.cache_config import get_cache_config_struct from pyignite.datatypes.cache_properties import prop_map -from pyignite.datatypes import Int, Byte, prop_codes, Short, String, StringArray +from pyignite.datatypes import Int, prop_codes, Short, String, StringArray from pyignite.queries import Query, ConfigQuery, query_perform from pyignite.queries.op_codes import ( OP_CACHE_GET_CONFIGURATION, OP_CACHE_CREATE_WITH_NAME, OP_CACHE_GET_OR_CREATE_WITH_NAME, OP_CACHE_DESTROY, @@ -37,6 +37,9 @@ from pyignite.utils import cache_id from .result import APIResult +from ..datatypes.prop_codes import PROP_EXPIRY_POLICY +from ..exceptions import NotSupportedByClusterError +from ..queries.query import CacheInfo def compact_cache_config(cache_config: dict) -> dict: @@ -57,29 +60,26 @@ def compact_cache_config(cache_config: dict) -> dict: return result -def cache_get_configuration(connection: 'Connection', cache: Union[str, int], - flags: int = 0, query_id=None) -> 'APIResult': +def cache_get_configuration(connection: 'Connection', cache: CacheInfo, query_id=None) -> 'APIResult': """ Gets configuration for the given cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param flags: Ignite documentation is unclear on this subject, + :param cache: Cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Result value is OrderedDict with the cache configuration parameters. """ - return __cache_get_configuration(connection, cache, flags, query_id) + return __cache_get_configuration(connection, cache, query_id) -async def cache_get_configuration_async(connection: 'AioConnection', cache: Union[str, int], - flags: int = 0, query_id=None) -> 'APIResult': +async def cache_get_configuration_async(connection: 'AioConnection', cache: CacheInfo, query_id=None) -> 'APIResult': """ Async version of cache_get_configuration. """ - return await __cache_get_configuration(connection, cache, flags, query_id) + return await __cache_get_configuration(connection, cache, query_id) def __post_process_cache_config(result): @@ -88,22 +88,20 @@ def __post_process_cache_config(result): return result -def __cache_get_configuration(connection, cache, flags, query_id): +def __cache_get_configuration(connection, cache, query_id): query_struct = Query( OP_CACHE_GET_CONFIGURATION, [ - ('hash_code', Int), - ('flags', Byte), + ('cache_info', CacheInfo) ], query_id=query_id, ) return query_perform(query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flags': flags + 'cache_info': cache }, response_config=[ - ('cache_config', cache_config_struct) + ('cache_config', get_cache_config_struct(connection.protocol_context)) ], post_process_fun=__post_process_cache_config ) @@ -184,9 +182,9 @@ async def cache_destroy_async(connection: 'AioConnection', cache: Union[str, int def __cache_destroy(connection, cache, query_id): - query_struct = Query(OP_CACHE_DESTROY, [('hash_code', Int)], query_id=query_id) + query_struct = Query(OP_CACHE_DESTROY, [('cache_id', Int)], query_id=query_id) - return query_perform(query_struct, connection, query_params={'hash_code': cache_id(cache)}) + return query_perform(query_struct, connection, query_params={'cache_id': cache_id(cache)}) def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': @@ -278,8 +276,12 @@ async def cache_get_or_create_with_config_async(connection: 'AioConnection', cac def __cache_create_with_config(op_code, connection, cache_props, query_id): prop_types, prop_values = {}, {} + is_expiry_policy_supported = connection.protocol_context.is_expiry_policy_supported() for i, prop_item in enumerate(cache_props.items()): prop_code, prop_value = prop_item + if prop_code == PROP_EXPIRY_POLICY and not is_expiry_policy_supported: + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + prop_name = 'property_{}'.format(i) prop_types[prop_name] = prop_map(prop_code) prop_values[prop_name] = prop_value diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 9fb13bb..6fe8183 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -23,16 +23,16 @@ OP_CACHE_CLEAR_KEYS, OP_CACHE_REMOVE_KEY, OP_CACHE_REMOVE_IF_EQUALS, OP_CACHE_REMOVE_KEYS, OP_CACHE_REMOVE_ALL, OP_CACHE_GET_SIZE, OP_CACHE_LOCAL_PEEK ) -from pyignite.datatypes import Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ByteArray +from pyignite.datatypes import Map, Bool, Long, AnyDataArray, AnyDataObject, ByteArray from pyignite.datatypes.base import IgniteDataType from pyignite.queries import Query, query_perform -from pyignite.utils import cache_id from .result import APIResult +from ..queries.query import CacheInfo -def cache_put(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_put(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). @@ -45,32 +45,29 @@ def cache_put(connection: 'Connection', cache: Union[str, int], key: Any, value: should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ - return __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_put(connection, cache, key, value, key_hint, value_hint, query_id) -async def cache_put_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +async def cache_put_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put """ - return await __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_put(connection, cache, key, value, key_hint, value_hint, query_id) -def __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_put(connection, cache, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_PUT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -79,16 +76,15 @@ def __cache_put(connection, cache, key, value, key_hint, value_hint, binary, que return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'value': value } ) -def cache_get(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_get(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Retrieves a value from cache by key. @@ -97,32 +93,28 @@ def cache_get(connection: 'Connection', cache: Union[str, int], key: Any, key_hi :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a value retrieved on success, non-zero status and an error description on failure. """ - return __cache_get(connection, cache, key, key_hint, binary, query_id) + return __cache_get(connection, cache, key, key_hint, query_id) -async def cache_get_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_get_async(connection: 'AioConnection', cache: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get """ - return await __cache_get(connection, cache, key, key_hint, binary, query_id) + return await __cache_get(connection, cache, key, key_hint, query_id) -def __cache_get(connection, cache, key, key_hint, binary, query_id): +def __cache_get(connection, cache, key, key_hint, query_id): query_struct = Query( OP_CACHE_GET, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -130,8 +122,7 @@ def __cache_get(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, }, response_config=[ @@ -141,7 +132,7 @@ def __cache_get(connection, cache, key, key_hint, binary, query_id): ) -def cache_get_all(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, +def cache_get_all(connection: 'Connection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. @@ -149,8 +140,6 @@ def cache_get_all(connection: 'Connection', cache: Union[str, int], keys: Iterab :param connection: connection to Ignite server, :param cache: name or ID of the cache, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -158,23 +147,22 @@ def cache_get_all(connection: 'Connection', cache: Union[str, int], keys: Iterab retrieved key-value pairs, non-zero status and an error description on failure. """ - return __cache_get_all(connection, cache, keys, binary, query_id) + return __cache_get_all(connection, cache, keys, query_id) -async def cache_get_all_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, binary: bool = False, +async def cache_get_all_async(connection: 'AioConnection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_all. """ - return await __cache_get_all(connection, cache, keys, binary, query_id) + return await __cache_get_all(connection, cache, keys, query_id) -def __cache_get_all(connection, cache, keys, binary, query_id): +def __cache_get_all(connection, cache, keys, query_id): query_struct = Query( OP_CACHE_GET_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -182,8 +170,7 @@ def __cache_get_all(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'keys': keys, }, response_config=[ @@ -193,7 +180,7 @@ def __cache_get_all(connection, cache, keys, binary, query_id): ) -def cache_put_all(connection: 'Connection', cache: Union[str, int], pairs: dict, binary: bool = False, +def cache_put_all(connection: 'Connection', cache: CacheInfo, pairs: dict, query_id: Optional[int] = None) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations @@ -204,31 +191,28 @@ def cache_put_all(connection: 'Connection', cache: Union[str, int], pairs: dict, :param pairs: dictionary type parameters, contains key-value pairs to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status if key-value pairs are written, non-zero status and an error description otherwise. """ - return __cache_put_all(connection, cache, pairs, binary, query_id) + return __cache_put_all(connection, cache, pairs, query_id) -async def cache_put_all_async(connection: 'AioConnection', cache: Union[str, int], pairs: dict, binary: bool = False, +async def cache_put_all_async(connection: 'AioConnection', cache: CacheInfo, pairs: dict, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put_all. """ - return await __cache_put_all(connection, cache, pairs, binary, query_id) + return await __cache_put_all(connection, cache, pairs, query_id) -def __cache_put_all(connection, cache, pairs, binary, query_id): +def __cache_put_all(connection, cache, pairs, query_id): query_struct = Query( OP_CACHE_PUT_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('data', Map), ], query_id=query_id, @@ -236,15 +220,14 @@ def __cache_put_all(connection, cache, pairs, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'data': pairs, }, ) -def cache_contains_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_contains_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. @@ -253,8 +236,6 @@ def cache_contains_key(connection: 'Connection', cache: Union[str, int], key: An :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -262,24 +243,22 @@ def cache_contains_key(connection: 'Connection', cache: Union[str, int], key: An retrieved on success: `True` when key is present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_key(connection, cache, key, key_hint, binary, query_id) + return __cache_contains_key(connection, cache, key, key_hint, query_id) -async def cache_contains_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_contains_key_async(connection: 'AioConnection', cache: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_contains_key. """ - return await __cache_contains_key(connection, cache, key, key_hint, binary, query_id) + return await __cache_contains_key(connection, cache, key, key_hint, query_id) -def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): +def __cache_contains_key(connection, cache, key, key_hint, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -287,8 +266,7 @@ def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, }, response_config=[ @@ -298,7 +276,7 @@ def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): ) -def cache_contains_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, +def cache_contains_keys(connection: 'Connection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. @@ -306,8 +284,6 @@ def cache_contains_keys(connection: 'Connection', cache: Union[str, int], keys: :param connection: connection to Ignite server, :param cache: name or ID of the cache, :param keys: a list of keys or (key, type hint) tuples, - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -315,23 +291,22 @@ def cache_contains_keys(connection: 'Connection', cache: Union[str, int], keys: retrieved on success: `True` when all keys are present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_keys(connection, cache, keys, binary, query_id) + return __cache_contains_keys(connection, cache, keys, query_id) -async def cache_contains_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +async def cache_contains_keys_async(connection: 'AioConnection', cache: CacheInfo, keys: Iterable, + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_contains_keys. """ - return await __cache_contains_keys(connection, cache, keys, binary, query_id) + return await __cache_contains_keys(connection, cache, keys, query_id) -def __cache_contains_keys(connection, cache, keys, binary, query_id): +def __cache_contains_keys(connection, cache, keys, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -339,8 +314,7 @@ def __cache_contains_keys(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'keys': keys, }, response_config=[ @@ -350,8 +324,8 @@ def __cache_contains_keys(connection, cache, keys, binary, query_id): ) -def cache_get_and_put(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_get_and_put(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache, and returns the previous value @@ -365,8 +339,6 @@ def cache_get_and_put(connection: 'Connection', cache: Union[str, int], key: Any should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -374,24 +346,23 @@ def cache_get_and_put(connection: 'Connection', cache: Union[str, int], key: Any or None if a value is written, non-zero status and an error description in case of error. """ - return __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, query_id) -async def cache_get_and_put_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_get_and_put_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_put. """ - return await __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, query_id) -def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -400,8 +371,7 @@ def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, bin return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'value': value, }, @@ -412,8 +382,8 @@ def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, bin ) -def cache_get_and_replace(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_get_and_replace(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache, returning previous value @@ -428,31 +398,28 @@ def cache_get_and_replace(connection: 'Connection', cache: Union[str, int], key: should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id) + return __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, query_id) -async def cache_get_and_replace_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_get_and_replace_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_replace. """ - return await __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id) + return await __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, query_id) -def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id): +def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -461,8 +428,7 @@ def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'value': value, }, @@ -473,8 +439,8 @@ def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, ) -def cache_get_and_remove(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_get_and_remove(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. @@ -483,28 +449,25 @@ def cache_get_and_remove(connection: 'Connection', cache: Union[str, int], key: :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and an old value or None, non-zero status and an error description otherwise. """ - return __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id) + return __cache_get_and_remove(connection, cache, key, key_hint, query_id) -async def cache_get_and_remove_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': - return await __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id) +async def cache_get_and_remove_async( + connection: 'AioConnection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': + return await __cache_get_and_remove(connection, cache, key, key_hint, query_id) -def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): +def __cache_get_and_remove(connection, cache, key, key_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -512,8 +475,7 @@ def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, }, response_config=[ @@ -523,9 +485,9 @@ def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): ) -def cache_put_if_absent(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, +def cache_put_if_absent(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. @@ -538,32 +500,29 @@ def cache_put_if_absent(connection: 'Connection', cache: Union[str, int], key: A should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. False - by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) -async def cache_put_if_absent_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_put_if_absent_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put_if_absent. """ - return await __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) -def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -572,8 +531,7 @@ def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, b return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'value': value, }, @@ -584,9 +542,9 @@ def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, b ) -def cache_get_and_put_if_absent(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, +def cache_get_and_put_if_absent(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. @@ -599,32 +557,29 @@ def cache_get_and_put_if_absent(connection: 'Connection', cache: Union[str, int] should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. False - by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) -async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_put_if_absent. """ - return await __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) -def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -633,8 +588,7 @@ def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'value': value, }, @@ -645,8 +599,8 @@ def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value ) -def cache_replace(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_replace(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. @@ -659,8 +613,6 @@ def cache_replace(connection: 'Connection', cache: Union[str, int], key: Any, va should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -668,24 +620,23 @@ def cache_replace(connection: 'Connection', cache: Union[str, int], key: Any, va success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_replace(connection, cache, key, value, key_hint, value_hint, query_id) -async def cache_replace_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_replace_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_replace. """ - return await __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_replace(connection, cache, key, value, key_hint, value_hint, query_id) -def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_replace(connection, cache, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_REPLACE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -694,8 +645,7 @@ def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'value': value, }, @@ -706,10 +656,9 @@ def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, ) -def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, value: Any, +def cache_replace_if_equals(connection: 'Connection', cache: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - value_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': + value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists and value equals provided sample. @@ -725,8 +674,6 @@ def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], ke the given sample should be converted :param value_hint: (optional) Ignite data type, for which the given value should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -734,28 +681,26 @@ def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], ke success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, binary, - query_id) + return __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, + sample_hint, value_hint, query_id) async def cache_replace_if_equals_async( - connection: 'AioConnection', cache: Union[str, int], key: Any, sample: Any, value: Any, + connection: 'AioConnection', cache: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_replace_if_equals. """ - return await __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, - binary, query_id) + return await __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, + sample_hint, value_hint, query_id) -def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, binary, - query_id): +def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, query_id): query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), @@ -765,8 +710,7 @@ def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, s return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'sample': sample, 'value': value, @@ -778,52 +722,47 @@ def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, s ) -def cache_clear(connection: 'Connection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_clear(connection: 'Connection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. :param connection: connection to Ignite server, :param cache: name or ID of the cache, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear(connection, cache, binary, query_id) + return __cache_clear(connection, cache, query_id) -async def cache_clear_async(connection: 'AioConnection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_async( + connection: 'AioConnection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear. """ - return await __cache_clear(connection, cache, binary, query_id) + return await __cache_clear(connection, cache, query_id) -def __cache_clear(connection, cache, binary, query_id): +def __cache_clear(connection, cache, query_id): query_struct = Query( OP_CACHE_CLEAR, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ], query_id=query_id, ) return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, }, ) -def cache_clear_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_clear_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -832,32 +771,28 @@ def cache_clear_key(connection: 'Connection', cache: Union[str, int], key: Any, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_key(connection, cache, key, key_hint, binary, query_id) + return __cache_clear_key(connection, cache, key, key_hint, query_id) -async def cache_clear_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_key_async(connection: 'AioConnection', cache: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear_key. """ - return await __cache_clear_key(connection, cache, key, key_hint, binary, query_id) + return await __cache_clear_key(connection, cache, key, key_hint, query_id) -def __cache_clear_key(connection, cache, key, key_hint, binary, query_id): +def __cache_clear_key(connection, cache, key, key_hint, query_id): query_struct = Query( OP_CACHE_CLEAR_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -865,46 +800,42 @@ def __cache_clear_key(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, }, ) -def cache_clear_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_clear_keys( + connection: 'Connection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. :param connection: connection to Ignite server, :param cache: name or ID of the cache, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_keys(connection, cache, keys, binary, query_id) + return __cache_clear_keys(connection, cache, keys, query_id) -async def cache_clear_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_keys_async( + connection: 'AioConnection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear_keys. """ - return await __cache_clear_keys(connection, cache, keys, binary, query_id) + return await __cache_clear_keys(connection, cache, keys, query_id) -def __cache_clear_keys(connection, cache, keys, binary, query_id): +def __cache_clear_keys(connection, cache, keys, query_id): query_struct = Query( OP_CACHE_CLEAR_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -912,15 +843,14 @@ def __cache_clear_keys(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'keys': keys, }, ) -def cache_remove_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -929,8 +859,6 @@ def cache_remove_key(connection: 'Connection', cache: Union[str, int], key: Any, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -938,24 +866,22 @@ def cache_remove_key(connection: 'Connection', cache: Union[str, int], key: Any, success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_key(connection, cache, key, key_hint, binary, query_id) + return __cache_remove_key(connection, cache, key, key_hint, query_id) -async def cache_remove_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_key_async(connection: 'AioConnection', cache: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_key. """ - return await __cache_remove_key(connection, cache, key, key_hint, binary, query_id) + return await __cache_remove_key(connection, cache, key, key_hint, query_id) -def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): +def __cache_remove_key(connection, cache, key, key_hint, query_id): query_struct = Query( OP_CACHE_REMOVE_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -963,8 +889,7 @@ def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, }, response_config=[ @@ -974,9 +899,9 @@ def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): ) -def cache_remove_if_equals(connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, +def cache_remove_if_equals(connection: 'Connection', cache: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Removes an entry with a given key if provided value is equal to actual value, notifying listeners and cache writers. @@ -989,8 +914,6 @@ def cache_remove_if_equals(connection: 'Connection', cache: Union[str, int], key should be converted, :param sample_hint: (optional) Ignite data type, for whic the given sample should be converted - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -998,24 +921,23 @@ def cache_remove_if_equals(connection: 'Connection', cache: Union[str, int], key success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id) + return __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, query_id) async def cache_remove_if_equals_async( - connection: 'AioConnection', cache: Union[str, int], key: Any, sample: Any, key_hint: 'IgniteDataType' = None, - sample_hint: 'IgniteDataType' = None, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, + sample_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_if_equals. """ - return await __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id) + return await __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, query_id) -def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id): +def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, query_id): query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ], @@ -1024,8 +946,7 @@ def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hi return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'sample': sample, }, @@ -1036,39 +957,36 @@ def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hi ) -def cache_remove_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_keys( + connection: 'Connection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. :param connection: connection to Ignite server, :param cache: name or ID of the cache, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_keys(connection, cache, keys, binary, query_id) + return __cache_remove_keys(connection, cache, keys, query_id) -async def cache_remove_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_keys_async( + connection: 'AioConnection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_keys. """ - return await __cache_remove_keys(connection, cache, keys, binary, query_id) + return await __cache_remove_keys(connection, cache, keys, query_id) -def __cache_remove_keys(connection, cache, keys, binary, query_id): +def __cache_remove_keys(connection, cache, keys, query_id): query_struct = Query( OP_CACHE_REMOVE_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -1076,59 +994,53 @@ def __cache_remove_keys(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'keys': keys, }, ) -def cache_remove_all(connection: 'Connection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_all(connection: 'Connection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Removes all entries from cache, notifying listeners and cache writers. :param connection: connection to Ignite server, :param cache: name or ID of the cache, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_all(connection, cache, binary, query_id) + return __cache_remove_all(connection, cache, query_id) -async def cache_remove_all_async(connection: 'AioConnection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_all_async( + connection: 'AioConnection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_all. """ - return await __cache_remove_all(connection, cache, binary, query_id) + return await __cache_remove_all(connection, cache, query_id) -def __cache_remove_all(connection, cache, binary, query_id): +def __cache_remove_all(connection, cache, query_id): query_struct = Query( OP_CACHE_REMOVE_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ], query_id=query_id, ) return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, }, ) -def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: Union[int, list, tuple] = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_get_size(connection: 'Connection', cache: CacheInfo, peek_modes: Union[int, list, tuple] = None, + query_id: Optional[int] = None) -> 'APIResult': """ Gets the number of entries in cache. @@ -1137,8 +1049,6 @@ def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to pimary cache partitions (PeekModes.PRIMARY), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -1146,16 +1056,17 @@ def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: cache entries on success, non-zero status and an error description otherwise. """ - return __cache_get_size(connection, cache, peek_modes, binary, query_id) + return __cache_get_size(connection, cache, peek_modes, query_id) -async def cache_get_size_async(connection: 'AioConnection', cache: Union[str, int], - peek_modes: Union[int, list, tuple] = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': - return await __cache_get_size(connection, cache, peek_modes, binary, query_id) +async def cache_get_size_async( + connection: 'AioConnection', cache: CacheInfo, peek_modes: Union[int, list, tuple] = None, + query_id: Optional[int] = None +) -> 'APIResult': + return await __cache_get_size(connection, cache, peek_modes, query_id) -def __cache_get_size(connection, cache, peek_modes, binary, query_id): +def __cache_get_size(connection, cache, peek_modes, query_id): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1164,8 +1075,7 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): query_struct = Query( OP_CACHE_GET_SIZE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('peek_modes', ByteArray), ], query_id=query_id, @@ -1173,8 +1083,7 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'peek_modes': peek_modes, }, response_config=[ @@ -1184,9 +1093,8 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): ) -def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_local_peek(conn: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. @@ -1201,28 +1109,26 @@ def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_h :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a peeked value (null if not found). """ - return __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) + return __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id) async def cache_local_peek_async( - conn: 'AioConnection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': + conn: 'AioConnection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None +) -> 'APIResult': """ Async version of cache_local_peek. """ - return await __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) + return await __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id) -def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id): +def __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1231,8 +1137,7 @@ def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) query_struct = Query( OP_CACHE_LOCAL_PEEK, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('peek_modes', ByteArray), ], @@ -1241,8 +1146,7 @@ def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) return query_perform( query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'key': key, 'peek_modes': peek_modes, }, diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index b10cc7d..6f66347 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -13,35 +13,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union - from pyignite.connection import AioConnection, Connection -from pyignite.datatypes import AnyDataArray, AnyDataObject, Bool, Byte, Int, Long, Map, Null, String, StructArray +from pyignite.datatypes import AnyDataArray, AnyDataObject, Bool, Int, Long, Map, Null, String, StructArray from pyignite.datatypes.sql import StatementType from pyignite.queries import Query, query_perform from pyignite.queries.op_codes import ( OP_QUERY_SCAN, OP_QUERY_SCAN_CURSOR_GET_PAGE, OP_QUERY_SQL, OP_QUERY_SQL_CURSOR_GET_PAGE, OP_QUERY_SQL_FIELDS, OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, OP_RESOURCE_CLOSE ) -from pyignite.utils import cache_id, deprecated +from pyignite.utils import deprecated from .result import APIResult +from ..queries.query import CacheInfo from ..queries.response import SQLResponse -def scan(conn: 'Connection', cache: Union[str, int], page_size: int, partitions: int = -1, local: bool = False, - binary: bool = False, query_id: int = None) -> APIResult: +def scan(conn: 'Connection', cache: CacheInfo, page_size: int, partitions: int = -1, local: bool = False, + query_id: int = None) -> APIResult: """ Performs scan query. :param conn: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache: Cache meta info. :param page_size: cursor page size, :param partitions: (optional) number of partitions to query (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -56,15 +53,15 @@ def scan(conn: 'Connection', cache: Union[str, int], page_size: int, partitions: * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ - return __scan(conn, cache, page_size, partitions, local, binary, query_id) + return __scan(conn, cache, page_size, partitions, local, query_id) -async def scan_async(conn: 'AioConnection', cache: Union[str, int], page_size: int, partitions: int = -1, - local: bool = False, binary: bool = False, query_id: int = None) -> APIResult: +async def scan_async(conn: 'AioConnection', cache: CacheInfo, page_size: int, partitions: int = -1, + local: bool = False, query_id: int = None) -> APIResult: """ Async version of scan. """ - return await __scan(conn, cache, page_size, partitions, local, binary, query_id) + return await __scan(conn, cache, page_size, partitions, local, query_id) def __query_result_post_process(result): @@ -73,12 +70,11 @@ def __query_result_post_process(result): return result -def __scan(conn, cache, page_size, partitions, local, binary, query_id): +def __scan(conn, cache, page_size, partitions, local, query_id): query_struct = Query( OP_QUERY_SCAN, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('filter', Null), ('page_size', Int), ('partitions', Int), @@ -89,8 +85,7 @@ def __scan(conn, cache, page_size, partitions, local, binary, query_id): return query_perform( query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'filter': None, 'page_size': page_size, 'partitions': partitions, @@ -156,18 +151,17 @@ def __scan_cursor_get_page(conn, cursor, query_id): @deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " "Use sql_fields instead") def sql( - conn: 'Connection', cache: Union[str, int], + conn: 'Connection', cache: CacheInfo, table_name: str, query_str: str, page_size: int, query_args=None, distributed_joins: bool = False, replicated_only: bool = False, - local: bool = False, timeout: int = 0, binary: bool = False, - query_id: int = None + local: bool = False, timeout: int = 0, query_id: int = None ) -> APIResult: """ Executes an SQL query over data stored in the cluster. The query returns the whole record (key and value). :param conn: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache: Cache meta info, :param table_name: name of a type or SQL table, :param query_str: SQL query string, :param page_size: cursor page size, @@ -179,8 +173,6 @@ def sql( on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -202,8 +194,7 @@ def sql( query_struct = Query( OP_QUERY_SQL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('table_name', String), ('query_str', String), ('query_args', AnyDataArray()), @@ -218,8 +209,7 @@ def sql( result = query_struct.perform( conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'table_name': table_name, 'query_str': query_str, 'query_args': query_args, @@ -287,19 +277,19 @@ def sql_cursor_get_page( def sql_fields( - conn: 'Connection', cache: Union[str, int], + conn: 'Connection', cache: CacheInfo, query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, - timeout: int = 0, binary: bool = False, query_id: int = None + timeout: int = 0, query_id: int = None ) -> APIResult: """ Performs SQL fields query. :param conn: connection to Ignite server, - :param cache: name or ID of the cache. If zero, then schema is used. + :param cache: Cache meta info. :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments. List of values or @@ -307,9 +297,9 @@ def sql_fields( :param schema: schema for the query. :param statement_type: (optional) statement type. Can be: - * StatementType.ALL − any type (default), + * StatementType.ALL − any type (default), * StatementType.SELECT − select, - * StatementType.UPDATE − update. + * StatementType.UPDATE − update. :param distributed_joins: (optional) distributed joins. :param local: (optional) pass True if this query should be executed @@ -323,7 +313,6 @@ def sql_fields( :param max_rows: (optional) query-wide maximum of rows. :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout. - :param binary: (optional) pass True to keep the value in binary form. :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -340,37 +329,37 @@ def sql_fields( """ return __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, - timeout, binary, query_id) + timeout, query_id) async def sql_fields_async( - conn: 'AioConnection', cache: Union[str, int], + conn: 'AioConnection', cache: CacheInfo, query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, - timeout: int = 0, binary: bool = False, query_id: int = None + timeout: int = 0, query_id: int = None ) -> APIResult: """ Async version of sql_fields. """ return await __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout, binary, query_id) + max_rows, timeout, query_id) -def __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, - replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, - binary, query_id): +def __sql_fields( + conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, + replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, query_id +): if query_args is None: query_args = [] query_struct = Query( OP_QUERY_SQL_FIELDS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('schema', String), ('page_size', Int), ('max_rows', Int), @@ -393,8 +382,7 @@ def __sql_fields(conn, cache, query_str, page_size, query_args, schema, statemen return query_perform( query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache, 'schema': schema, 'page_size': page_size, 'max_rows': max_rows, diff --git a/pyignite/cache.py b/pyignite/cache.py index 3c93637..0ca76cf 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -15,9 +15,10 @@ from typing import Any, Iterable, Optional, Tuple, Union -from .datatypes import prop_codes +from .datatypes import prop_codes, ExpiryPolicy from .datatypes.internal import AnyDataObject from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError +from .queries.query import CacheInfo from .utils import cache_id, status_to_exception from .api.cache_config import ( cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, cache_destroy, @@ -93,12 +94,14 @@ def __parse_settings(settings: Union[str, dict]) -> Tuple[Optional[str], Optiona class BaseCache: - def __init__(self, client: 'BaseClient', name: str): + def __init__(self, client: 'BaseClient', name: str, expiry_policy: ExpiryPolicy = None): self._client = client self._name = name self._settings = None - self._cache_id = cache_id(self._name) - self._client.register_cache(self._cache_id) + self._cache_info = CacheInfo(cache_id=cache_id(self._name), + protocol_context=client.protocol_context, + expiry_policy=expiry_policy) + self._client.register_cache(self.cache_info.cache_id) @property def name(self) -> str: @@ -114,6 +117,13 @@ def client(self) -> 'BaseClient': """ return self._client + @property + def cache_info(self) -> CacheInfo: + """ + Cache meta info. + """ + return self._cache_info + @property def cache_id(self) -> int: """ @@ -121,7 +131,17 @@ def cache_id(self) -> int: :return: integer value of the cache ID. """ - return self._cache_id + return self._cache_info.cache_id + + def with_expire_policy( + self, expiry_policy: ExpiryPolicy = None, create: Union[int, float] = ExpiryPolicy.UNCHANGED, + update: Union[int, float] = ExpiryPolicy.UNCHANGED, access: Union[int, float] = ExpiryPolicy.UNCHANGED + ): + cache_cls = type(self) + if not expiry_policy: + expiry_policy = ExpiryPolicy(create=create, update=update, access=access) + + return cache_cls(self.client, self.name, expiry_policy) class Cache(BaseCache): @@ -134,17 +154,17 @@ class Cache(BaseCache): :ref:`this example ` on how to do it. """ - def __init__(self, client: 'Client', name: str): + def __init__(self, client: 'Client', name: str, expiry_policy: ExpiryPolicy = None): """ Initialize cache object. For internal use. :param client: Ignite client, :param name: Cache name. """ - super().__init__(client, name) + super().__init__(client, name, expiry_policy) def _get_best_node(self, key=None, key_hint=None): - return self.client.get_best_node(self._cache_id, key, key_hint) + return self.client.get_best_node(self, key, key_hint) @property def settings(self) -> Optional[dict]: @@ -159,7 +179,7 @@ def settings(self) -> Optional[dict]: if self._settings is None: config_result = cache_get_configuration( self._get_best_node(), - self._cache_id + self.cache_info ) if config_result.status == 0: self._settings = config_result.value @@ -173,7 +193,7 @@ def destroy(self): """ Destroys cache with a given name. """ - return cache_destroy(self._get_best_node(), self._cache_id) + return cache_destroy(self._get_best_node(), self.cache_id) @status_to_exception(CacheError) def get(self, key, key_hint: object = None) -> Any: @@ -190,7 +210,7 @@ def get(self, key, key_hint: object = None) -> Any: result = cache_get( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint=key_hint ) @@ -198,9 +218,7 @@ def get(self, key, key_hint: object = None) -> Any: return result @status_to_exception(CacheError) - def put( - self, key, value, key_hint: object = None, value_hint: object = None - ): + def put(self, key, value, key_hint: object = None, value_hint: object = None): """ Puts a value with a given key to cache (overwriting existing value if any). @@ -217,7 +235,7 @@ def put( return cache_put( self._get_best_node(key, key_hint), - self._cache_id, key, value, + self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint ) @@ -229,7 +247,7 @@ def get_all(self, keys: list) -> list: :param keys: list of keys or tuples of (key, key_hint), :return: a dict of key-value pairs. """ - result = cache_get_all(self._get_best_node(), self._cache_id, keys) + result = cache_get_all(self._get_best_node(), self.cache_info, keys) if result.value: for key, value in result.value.items(): result.value[key] = self.client.unwrap_binary(value) @@ -245,7 +263,7 @@ def put_all(self, pairs: dict): to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), """ - return cache_put_all(self._get_best_node(), self._cache_id, pairs) + return cache_put_all(self._get_best_node(), self.cache_info, pairs) @status_to_exception(CacheError) def replace( @@ -266,7 +284,7 @@ def replace( result = cache_replace( self._get_best_node(key, key_hint), - self._cache_id, key, value, + self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint ) result.value = self.client.unwrap_binary(result.value) @@ -282,9 +300,9 @@ def clear(self, keys: Optional[list] = None): """ conn = self._get_best_node() if keys: - return cache_clear_keys(conn, self._cache_id, keys) + return cache_clear_keys(conn, self.cache_info, keys) else: - return cache_clear(conn, self._cache_id) + return cache_clear(conn, self.cache_info) @status_to_exception(CacheError) def clear_key(self, key, key_hint: object = None): @@ -300,7 +318,7 @@ def clear_key(self, key, key_hint: object = None): return cache_clear_key( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint=key_hint ) @@ -313,7 +331,7 @@ def clear_keys(self, keys: Iterable): :param keys: a list of keys or (key, type hint) tuples """ - return cache_clear_keys(self._get_best_node(), self._cache_id, keys) + return cache_clear_keys(self._get_best_node(), self.cache_info, keys) @status_to_exception(CacheError) def contains_key(self, key, key_hint=None) -> bool: @@ -330,7 +348,7 @@ def contains_key(self, key, key_hint=None) -> bool: return cache_contains_key( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint=key_hint ) @@ -343,7 +361,7 @@ def contains_keys(self, keys: Iterable) -> bool: :param keys: a list of keys or (key, type hint) tuples, :return: boolean `True` when all keys are present, `False` otherwise. """ - return cache_contains_keys(self._get_best_node(), self._cache_id, keys) + return cache_contains_keys(self._get_best_node(), self.cache_info, keys) @status_to_exception(CacheError) def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -364,7 +382,7 @@ def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: result = cache_get_and_put( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -392,7 +410,7 @@ def get_and_put_if_absent( result = cache_get_and_put_if_absent( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -417,7 +435,7 @@ def put_if_absent(self, key, value, key_hint=None, value_hint=None): return cache_put_if_absent( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -437,7 +455,7 @@ def get_and_remove(self, key, key_hint=None) -> Any: result = cache_get_and_remove( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint ) @@ -466,7 +484,7 @@ def get_and_replace( result = cache_get_and_replace( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -486,7 +504,7 @@ def remove_key(self, key, key_hint=None): key_hint = AnyDataObject.map_python_type(key) return cache_remove_key( - self._get_best_node(key, key_hint), self._cache_id, key, key_hint + self._get_best_node(key, key_hint), self.cache_info, key, key_hint ) @status_to_exception(CacheError) @@ -498,7 +516,7 @@ def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ return cache_remove_keys( - self._get_best_node(), self._cache_id, keys + self._get_best_node(), self.cache_info, keys ) @status_to_exception(CacheError) @@ -506,7 +524,7 @@ def remove_all(self): """ Removes all cache entries, notifying listeners and cache writers. """ - return cache_remove_all(self._get_best_node(), self._cache_id) + return cache_remove_all(self._get_best_node(), self.cache_info) @status_to_exception(CacheError) def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -526,7 +544,7 @@ def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): return cache_remove_if_equals( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, sample, key_hint, sample_hint ) @@ -556,7 +574,7 @@ def replace_if_equals( result = cache_replace_if_equals( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, sample, value, key_hint, sample_hint, value_hint ) @@ -574,7 +592,7 @@ def get_size(self, peek_modes=None): :return: integer number of cache entries. """ return cache_get_size( - self._get_best_node(), self._cache_id, peek_modes + self._get_best_node(), self.cache_info, peek_modes ) def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> ScanCursor: @@ -590,7 +608,7 @@ def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> on local node only. Defaults to False, :return: Scan query cursor. """ - return ScanCursor(self.client, self._cache_id, page_size, partitions, local) + return ScanCursor(self.client, self.cache_info, page_size, partitions, local) def select_row( self, query_str: str, page_size: int = 1, @@ -621,5 +639,5 @@ def select_row( if not type_name: raise SQLError('Value type is unknown') - return SqlCursor(self.client, self._cache_id, type_name, query_str, page_size, query_args, + return SqlCursor(self.client, self.cache_info, type_name, query_str, page_size, query_args, distributed_joins, replicated_only, local, timeout) diff --git a/pyignite/client.py b/pyignite/client.py index 17e9d80..44a69da 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -58,6 +58,7 @@ from .datatypes.base import IgniteDataType from .datatypes.internal import tc_map from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .queries.query import CacheInfo from .stream import BinaryStream, READ_BACKWARD from .utils import ( cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, is_wrapped, @@ -719,15 +720,19 @@ def sql( :return: sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ + if isinstance(cache, (int, str)): + c_info = CacheInfo(cache_id=cache_id(cache), protocol_context=self.protocol_context) + elif isinstance(cache, Cache): + c_info = cache.cache_info + else: + c_info = None - c_id = cache.cache_id if isinstance(cache, Cache) else cache_id(cache) - - if c_id != 0: + if c_info: schema = None - return SqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, distributed_joins, - local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout) + return SqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, + include_field_names, max_rows, timeout) def get_cluster(self) -> 'Cluster': """ diff --git a/pyignite/cluster.py b/pyignite/cluster.py index f10afe4..5aefcc4 100644 --- a/pyignite/cluster.py +++ b/pyignite/cluster.py @@ -20,6 +20,7 @@ from pyignite.api.cluster import cluster_get_state, cluster_set_state from pyignite.exceptions import ClusterError from pyignite.utils import status_to_exception +from pyignite.datatypes import ClusterState class Cluster: @@ -33,7 +34,7 @@ def __init__(self, client: 'Client'): self._client = client @status_to_exception(ClusterError) - def get_state(self): + def get_state(self) -> 'ClusterState': """ Gets current cluster state. @@ -43,7 +44,7 @@ def get_state(self): return cluster_get_state(self._client.random_node) @status_to_exception(ClusterError) - def set_state(self, state): + def set_state(self, state: 'ClusterState'): """ Changes current cluster state to the given. diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py index 54f5240..be23e56 100644 --- a/pyignite/connection/protocol_context.py +++ b/pyignite/connection/protocol_context.py @@ -98,3 +98,6 @@ def is_cluster_api_supported(self) -> bool: Check whether cluster API supported by the current protocol. """ return self.features and BitmaskFeature.CLUSTER_API in self.features + + def is_expiry_policy_supported(self) -> bool: + return self.version >= (1, 6, 0) diff --git a/pyignite/cursors.py b/pyignite/cursors.py index 0a8f0b0..a690d94 100644 --- a/pyignite/cursors.py +++ b/pyignite/cursors.py @@ -64,15 +64,15 @@ def more(self, value): setattr(self, '_more', value) @property - def cache_id(self): + def cache_info(self): """ Cache id. """ - return getattr(self, '_cache_id', None) + return getattr(self, '_cache_info', None) - @cache_id.setter - def cache_id(self, value): - setattr(self, '_cache_id', value) + @cache_info.setter + def cache_info(self, value): + setattr(self, '_cache_info', value) @property def client(self): @@ -134,9 +134,9 @@ async def close(self): class AbstractScanCursor: - def __init__(self, client, cache_id, page_size, partitions, local): + def __init__(self, client, cache_info, page_size, partitions, local): self.client = client - self.cache_id = cache_id + self.cache_info = cache_info self._page_size = page_size self._partitions = partitions self._local = local @@ -159,18 +159,18 @@ class ScanCursor(AbstractScanCursor, CursorMixin): """ Synchronous scan cursor. """ - def __init__(self, client, cache_id, page_size, partitions, local): + def __init__(self, client, cache_info, page_size, partitions, local): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. :param page_size: page size. :param partitions: number of partitions to query (negative to query entire cache). :param local: pass True if this query should be executed on local node only. """ - super().__init__(client, cache_id, page_size, partitions, local) + super().__init__(client, cache_info, page_size, partitions, local) self.connection = self.client.random_node - result = scan(self.connection, self.cache_id, self._page_size, self._partitions, self._local) + result = scan(self.connection, self.cache_info, self._page_size, self._partitions, self._local) self._finalize_init(result) def __next__(self): @@ -193,20 +193,20 @@ class AioScanCursor(AbstractScanCursor, AioCursorMixin): """ Asynchronous scan query cursor. """ - def __init__(self, client, cache_id, page_size, partitions, local): + def __init__(self, client, cache_info, page_size, partitions, local): """ :param client: Asynchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. :param page_size: page size. :param partitions: number of partitions to query (negative to query entire cache). :param local: pass True if this query should be executed on local node only. """ - super().__init__(client, cache_id, page_size, partitions, local) + super().__init__(client, cache_info, page_size, partitions, local) async def __aenter__(self): if not self.connection: self.connection = await self.client.random_node() - result = await scan_async(self.connection, self.cache_id, self._page_size, self._partitions, self._local) + result = await scan_async(self.connection, self.cache_info, self._page_size, self._partitions, self._local) self._finalize_init(result) return self @@ -238,15 +238,15 @@ class SqlCursor(CursorMixin): """ Synchronous SQL query cursor. """ - def __init__(self, client, cache_id, *args, **kwargs): + def __init__(self, client, cache_info, *args, **kwargs): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. """ self.client = client - self.cache_id = cache_id + self.cache_info = cache_info self.connection = self.client.random_node - result = sql(self.connection, self.cache_id, *args, **kwargs) + result = sql(self.connection, self.cache_info, *args, **kwargs) if result.status != 0: raise SQLError(result.message) @@ -274,9 +274,9 @@ def __next__(self): class AbstractSqlFieldsCursor: - def __init__(self, client, cache_id): + def __init__(self, client, cache_info): self.client = client - self.cache_id = cache_id + self.cache_info = cache_info def _finalize_init(self, result): if result.status != 0: @@ -295,14 +295,14 @@ class SqlFieldsCursor(AbstractSqlFieldsCursor, CursorMixin): """ Synchronous SQL fields query cursor. """ - def __init__(self, client, cache_id, *args, **kwargs): + def __init__(self, client, cache_info, *args, **kwargs): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. """ - super().__init__(client, cache_id) + super().__init__(client, cache_info) self.connection = self.client.random_node - self._finalize_init(sql_fields(self.connection, self.cache_id, *args, **kwargs)) + self._finalize_init(sql_fields(self.connection, self.cache_info, *args, **kwargs)) def __next__(self): if not self.data: @@ -334,12 +334,12 @@ class AioSqlFieldsCursor(AbstractSqlFieldsCursor, AioCursorMixin): """ Asynchronous SQL fields query cursor. """ - def __init__(self, client, cache_id, *args, **kwargs): + def __init__(self, client, cache_info, *args, **kwargs): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. """ - super().__init__(client, cache_id) + super().__init__(client, cache_info) self._params = (args, kwargs) async def __aenter__(self): @@ -381,4 +381,4 @@ async def _initialize(self, *args, **kwargs): return self.connection = await self.client.random_node() - self._finalize_init(await sql_fields_async(self.connection, self.cache_id, *args, **kwargs)) + self._finalize_init(await sql_fields_async(self.connection, self.cache_info, *args, **kwargs)) diff --git a/pyignite/datatypes/__init__.py b/pyignite/datatypes/__init__.py index 5024f79..4f78dce 100644 --- a/pyignite/datatypes/__init__.py +++ b/pyignite/datatypes/__init__.py @@ -25,3 +25,5 @@ from .primitive_arrays import * from .primitive_objects import * from .standard import * +from .cluster_state import ClusterState +from .expiry_policy import ExpiryPolicy diff --git a/pyignite/datatypes/cache_config.py b/pyignite/datatypes/cache_config.py index 04ff607..a2b4322 100644 --- a/pyignite/datatypes/cache_config.py +++ b/pyignite/datatypes/cache_config.py @@ -12,14 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from . import ExpiryPolicy from .standard import String from .internal import AnyDataObject, Struct, StructArray from .primitive import * __all__ = [ - 'cache_config_struct', 'CacheMode', 'PartitionLossPolicy', + 'get_cache_config_struct', 'CacheMode', 'PartitionLossPolicy', 'RebalanceMode', 'WriteSynchronizationMode', 'IndexType', ] @@ -118,36 +118,40 @@ class CacheAtomicityMode(Int): ]) -cache_config_struct = Struct([ - ('length', Int), - ('cache_atomicity_mode', CacheAtomicityMode), - ('backups_number', Int), - ('cache_mode', CacheMode), - ('copy_on_read', Bool), - ('data_region_name', String), - ('eager_ttl', Bool), - ('statistics_enabled', Bool), - ('group_name', String), - ('default_lock_timeout', Long), - ('max_concurrent_async_operations', Int), - ('max_query_iterators', Int), - ('name', String), - ('is_onheap_cache_enabled', Bool), - ('partition_loss_policy', PartitionLossPolicy), - ('query_detail_metric_size', Int), - ('query_parallelism', Int), - ('read_from_backup', Bool), - ('rebalance_batch_size', Int), - ('rebalance_batches_prefetch_count', Long), - ('rebalance_delay', Long), - ('rebalance_mode', RebalanceMode), - ('rebalance_order', Int), - ('rebalance_throttle', Long), - ('rebalance_timeout', Long), - ('sql_escape_all', Bool), - ('sql_index_inline_max_size', Int), - ('sql_schema', String), - ('write_synchronization_mode', WriteSynchronizationMode), - ('cache_key_configuration', CacheKeyConfiguration), - ('query_entities', QueryEntities), -]) +def get_cache_config_struct(protocol_context): + fields = [ + ('length', Int), + ('cache_atomicity_mode', CacheAtomicityMode), + ('backups_number', Int), + ('cache_mode', CacheMode), + ('copy_on_read', Bool), + ('data_region_name', String), + ('eager_ttl', Bool), + ('statistics_enabled', Bool), + ('group_name', String), + ('default_lock_timeout', Long), + ('max_concurrent_async_operations', Int), + ('max_query_iterators', Int), + ('name', String), + ('is_onheap_cache_enabled', Bool), + ('partition_loss_policy', PartitionLossPolicy), + ('query_detail_metric_size', Int), + ('query_parallelism', Int), + ('read_from_backup', Bool), + ('rebalance_batch_size', Int), + ('rebalance_batches_prefetch_count', Long), + ('rebalance_delay', Long), + ('rebalance_mode', RebalanceMode), + ('rebalance_order', Int), + ('rebalance_throttle', Long), + ('rebalance_timeout', Long), + ('sql_escape_all', Bool), + ('sql_index_inline_max_size', Int), + ('sql_schema', String), + ('write_synchronization_mode', WriteSynchronizationMode), + ('cache_key_configuration', CacheKeyConfiguration), + ('query_entities', QueryEntities), + ] + if protocol_context.is_expiry_policy_supported(): + fields.append(('expiry_policy', ExpiryPolicy)) + return Struct(fields=fields) diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index 9bf34de..a1766f3 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -15,6 +15,7 @@ import ctypes +from . import ExpiryPolicy from .prop_codes import * from .cache_config import ( CacheMode, CacheAtomicityMode, PartitionLossPolicy, RebalanceMode, @@ -34,7 +35,7 @@ 'PropRebalanceOrder', 'PropRebalanceThrottle', 'PropGroupName', 'PropCacheKeyConfiguration', 'PropDefaultLockTimeout', 'PropMaxConcurrentAsyncOperation', 'PropPartitionLossPolicy', - 'PropEagerTTL', 'PropStatisticsEnabled', 'prop_map', 'AnyProperty', + 'PropEagerTTL', 'PropStatisticsEnabled', 'PropExpiryPolicy', 'prop_map', 'AnyProperty', ] @@ -70,6 +71,7 @@ def prop_map(code: int): PROP_PARTITION_LOSS_POLICY: PropPartitionLossPolicy, PROP_EAGER_TTL: PropEagerTTL, PROP_STATISTICS_ENABLED: PropStatisticsEnabled, + PROP_EXPIRY_POLICY: PropExpiryPolicy, }[code] @@ -285,6 +287,11 @@ class PropStatisticsEnabled(PropBase): prop_data_class = Bool +class PropExpiryPolicy(PropBase): + prop_code = PROP_EXPIRY_POLICY + prop_data_class = ExpiryPolicy + + class AnyProperty(PropBase): @classmethod diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py new file mode 100644 index 0000000..4333ab1 --- /dev/null +++ b/pyignite/datatypes/expiry_policy.py @@ -0,0 +1,87 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +from io import SEEK_CUR +from typing import Union + +import attr + +from pyignite.constants import PROTOCOL_BYTE_ORDER + + +def _positive(_, attrib, value): + if value < 0 and value not in [ExpiryPolicy.UNCHANGED, ExpiryPolicy.ETERNAL]: + raise ValueError(f"'{attrib.name}' value must not be negative") + + +def _write_duration(stream, value): + if isinstance(value, float): + value = int(value * 1000) + + stream.write(value.to_bytes(8, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + +@attr.s +class ExpiryPolicy: + UNCHANGED = -2 + ETERNAL = -1 + + create = attr.ib(kw_only=True, default=UNCHANGED, + validator=[attr.validators.instance_of((int, float)), _positive]) + update = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], + validator=[attr.validators.instance_of((int, float)), _positive]) + access = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], + validator=[attr.validators.instance_of((int, float)), _positive]) + + class CType(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('not_null', ctypes.c_byte), + ('create', ctypes.c_longlong), + ('update', ctypes.c_longlong), + ('access', ctypes.c_longlong) + ] + + @classmethod + def parse(cls, stream): + init = stream.tell() + not_null = int.from_bytes(stream.slice(init, 1), byteorder=PROTOCOL_BYTE_ORDER) + if not_null: + stream.seek(ctypes.sizeof(ExpiryPolicy.CType), SEEK_CUR) + return ExpiryPolicy.CType + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) + return ctypes.c_byte + + @classmethod + def to_python(cls, ctypes_object): + if ctypes_object == 0: + return None + + return ExpiryPolicy(create=ctypes_object.create, update=ctypes_object.update, access=ctypes_object.access) + + @classmethod + def from_python(cls, stream, value): + if not value: + stream.write(b'\x00') + return + + stream.write(b'\x01') + cls.write_policy(stream, value) + + @classmethod + def write_policy(cls, stream, value): + _write_duration(stream, value.create) + _write_duration(stream, value.update) + _write_duration(stream, value.access) diff --git a/pyignite/datatypes/prop_codes.py b/pyignite/datatypes/prop_codes.py index 72ffce1..9709313 100644 --- a/pyignite/datatypes/prop_codes.py +++ b/pyignite/datatypes/prop_codes.py @@ -47,3 +47,4 @@ PROP_PARTITION_LOSS_POLICY = 404 PROP_EAGER_TTL = 405 PROP_STATISTICS_ENABLED = 406 +PROP_EXPIRY_POLICY = 407 diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 8dac64f..d971eef 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -21,7 +21,10 @@ from pyignite.api.result import APIResult from pyignite.connection import Connection, AioConnection -from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED, PROTOCOL_BYTE_ORDER +from pyignite.datatypes import ExpiryPolicy +from pyignite.exceptions import NotSupportedByClusterError from pyignite.queries.response import Response from pyignite.stream import AioBinaryStream, BinaryStream, READ_BACKWARD @@ -44,6 +47,34 @@ def _internal(): return _internal() +@attr.s +class CacheInfo: + cache_id = attr.ib(kw_only=True, type=int) + expiry_policy = attr.ib(kw_only=True, type=ExpiryPolicy, default=None) + protocol_context = attr.ib(kw_only=True, type=ProtocolContext) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + @classmethod + def from_python(cls, stream, value): + cache_id = value.cache_id if value else 0 + expiry_policy = value.expiry_policy if value else None + flags = 0 + + stream.write(cache_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + if expiry_policy: + if not value.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + flags |= 0x04 + + stream.write(flags.to_bytes(1, byteorder=PROTOCOL_BYTE_ORDER)) + if expiry_policy: + ExpiryPolicy.write_policy(stream, expiry_policy) + + @attr.s class Query: op_code = attr.ib(type=int) diff --git a/tests/affinity/test_affinity.py b/tests/affinity/test_affinity.py index 64b9cc5..3097991 100644 --- a/tests/affinity/test_affinity.py +++ b/tests/affinity/test_affinity.py @@ -312,7 +312,7 @@ def inner(): best_node = client.get_best_node(cache, key, key_hint=key_hint) for node in filter(lambda n: n.alive, client._nodes): - result = cache_local_peek(node, cache.cache_id, key, key_hint=key_hint) + result = cache_local_peek(node, cache.cache_info, key, key_hint=key_hint) check_peek_value(node, best_node, result) @@ -321,7 +321,7 @@ async def inner_async(): best_node = await client.get_best_node(cache, key, key_hint=key_hint) for node in filter(lambda n: n.alive, client._nodes): - result = await cache_local_peek_async(node, cache.cache_id, key, key_hint=key_hint) + result = await cache_local_peek_async(node, cache.cache_info, key, key_hint=key_hint) check_peek_value(node, best_node, result) diff --git a/tests/common/test_expiry_policy.py b/tests/common/test_expiry_policy.py new file mode 100644 index 0000000..eb96536 --- /dev/null +++ b/tests/common/test_expiry_policy.py @@ -0,0 +1,151 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import time + +import pytest + +from pyignite.datatypes import ExpiryPolicy +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY + + +def test_expiry_policy(cache): + ttl, num_retries = 0.6, 10 + cache_eternal = cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) + cache_created = cache.with_expire_policy(create=0.6) + cache_updated = cache.with_expire_policy(update=0.6) + cache_accessed = cache.with_expire_policy(access=0.6) + + for _ in range(num_retries): + cache.clear() + + start = time.time() + + cache_eternal.put(0, 0) + cache_created.put(1, 1) + cache_updated.put(2, 2) + cache_accessed.put(3, 3) + + time.sleep(ttl * 2 / 3) + + result = [cache.contains_key(k) for k in range(4)] + + if time.time() - start >= ttl: + continue + + assert all(result) + + start = time.time() + + cache_created.put(1, 2) # Check that update doesn't matter for created policy + cache_created.get(1) # Check that access doesn't matter for created policy + cache_updated.put(2, 3) # Check that update policy works. + cache_accessed.get(3) # Check that access policy works. + + time.sleep(ttl * 2 / 3) + + result = [cache.contains_key(k) for k in range(4)] + + if time.time() - start >= ttl: + continue + + assert result == [True, False, True, True] + + time.sleep(ttl * 2 / 3) + + cache_updated.get(2) # Check that access doesn't matter for updated policy. + + time.sleep(ttl * 2 / 3) + + result = [cache.contains_key(k) for k in range(0, 4)] + assert result == [True, False, False, False] + + +@pytest.mark.asyncio +async def test_expiry_policy_async(async_cache): + ttl, num_retries = 0.6, 10 + cache_eternal = async_cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) + cache_created = async_cache.with_expire_policy(create=0.6) + cache_updated = async_cache.with_expire_policy(update=0.6) + cache_accessed = async_cache.with_expire_policy(access=0.6) + + for _ in range(num_retries): + await async_cache.clear() + + start = time.time() + + await asyncio.gather( + cache_eternal.put(0, 0), + cache_created.put(1, 1), + cache_updated.put(2, 2), + cache_accessed.put(3, 3) + ) + + await asyncio.sleep(ttl * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + + if time.time() - start >= ttl: + continue + + assert all(result) + + start = time.time() + + await asyncio.gather( + cache_created.put(1, 2), # Check that update doesn't matter for created policy + cache_created.get(1), # Check that access doesn't matter for created policy + cache_updated.put(2, 3), # Check that update policy works. + cache_accessed.get(3) # Check that access policy works. + ) + + await asyncio.sleep(ttl * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + + if time.time() - start >= ttl: + continue + + assert result == [True, False, True, True] + + await asyncio.sleep(ttl * 2 / 3) + + cache_updated.get(2) # Check that access doesn't matter for updated policy. + + await asyncio.sleep(ttl * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + assert result == [True, False, False, False] + + +@pytest.mark.parametrize( + 'expiry_policy', + [ + None, + ExpiryPolicy(), + ExpiryPolicy(create=ExpiryPolicy.ETERNAL), + ExpiryPolicy(create=2000, update=4000, access=6000) + ] +) +def test_create_cache_with_expiry_policy(client, expiry_policy): + cache = client.create_cache({ + PROP_NAME: 'expiry_cache', + PROP_EXPIRY_POLICY: expiry_policy + }) + try: + settings = cache.settings + assert settings[PROP_EXPIRY_POLICY] == expiry_policy + finally: + cache.destroy() diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py index e82e238..673d594 100644 --- a/tests/custom/test_cluster.py +++ b/tests/custom/test_cluster.py @@ -19,7 +19,7 @@ from pyignite.exceptions import CacheError from tests.util import clear_ignite_work_dir, start_ignite_gen -from pyignite.datatypes.cluster_state import ClusterState +from pyignite.datatypes import ClusterState @pytest.fixture(params=['with-persistence', 'without-persistence']) From aa82e234062750c3b01c2baa7d0fe4dcd26522c3 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 21 Apr 2021 22:17:53 +0300 Subject: [PATCH 2/6] IGNITE-14595 Rework tests --- pyignite/datatypes/expiry_policy.py | 12 ++++++++++++ tests/affinity/conftest.py | 7 +++---- tests/common/conftest.py | 11 +++++++++++ tests/common/test_binary.py | 4 ++-- tests/common/test_cache_config.py | 13 ++++++++++--- tests/common/test_expiry_policy.py | 24 ++++++++++++++++++++++-- tests/conftest.py | 1 + tests/custom/test_cluster.py | 9 +++++++++ 8 files changed, 70 insertions(+), 11 deletions(-) diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py index 4333ab1..38344d5 100644 --- a/pyignite/datatypes/expiry_policy.py +++ b/pyignite/datatypes/expiry_policy.py @@ -64,6 +64,10 @@ def parse(cls, stream): stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) return ctypes.c_byte + @classmethod + async def parse_async(cls, stream): + return cls.parse(stream) + @classmethod def to_python(cls, ctypes_object): if ctypes_object == 0: @@ -71,6 +75,10 @@ def to_python(cls, ctypes_object): return ExpiryPolicy(create=ctypes_object.create, update=ctypes_object.update, access=ctypes_object.access) + @classmethod + async def to_python_async(cls, ctypes_object): + return cls.to_python(ctypes_object) + @classmethod def from_python(cls, stream, value): if not value: @@ -80,6 +88,10 @@ def from_python(cls, stream, value): stream.write(b'\x01') cls.write_policy(stream, value) + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + @classmethod def write_policy(cls, stream, value): _write_duration(stream, value.create) diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py index e23e0e6..da645c1 100644 --- a/tests/affinity/conftest.py +++ b/tests/affinity/conftest.py @@ -66,7 +66,6 @@ async def async_client(connection_param): @pytest.fixture(scope='module', autouse=True) def skip_if_no_affinity(request, server1): client = Client(partition_aware=True) - client.connect('127.0.0.1', 10801) - - if not client.partition_awareness_supported_by_protocol: - pytest.skip(f'skipped {request.node.name}, partition awareness is not supported.') + with client.connect('127.0.0.1', 10801): + if not client.partition_awareness_supported_by_protocol: + pytest.skip(f'skipped {request.node.name}, partition awareness is not supported.') diff --git a/tests/common/conftest.py b/tests/common/conftest.py index 243d822..0f28f7e 100644 --- a/tests/common/conftest.py +++ b/tests/common/conftest.py @@ -70,3 +70,14 @@ def cache(client): yield cache finally: cache.destroy() + + +@pytest.fixture(autouse=True) +def expiry_policy_supported(request, server1): + client = Client() + with client.connect('127.0.0.1', 10801): + result = client.protocol_context.is_expiry_policy_supported() + if not result and request.node.get_closest_marker('skip_if_no_expiry_policy'): + pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + + return result diff --git a/tests/common/test_binary.py b/tests/common/test_binary.py index 1d7192f..c94c4d5 100644 --- a/tests/common/test_binary.py +++ b/tests/common/test_binary.py @@ -74,7 +74,7 @@ def table_cache_read(client): cache = client.get_cache(table_cache_name) yield cache - cache.destroy() + client.sql(drop_query) @pytest.fixture @@ -87,7 +87,7 @@ async def table_cache_read_async(async_client): cache = await async_client.get_cache(table_cache_name) yield cache - await cache.destroy() + await async_client.sql(drop_query) def test_sql_read_as_binary(table_cache_read): diff --git a/tests/common/test_cache_config.py b/tests/common/test_cache_config.py index e68eef5..e5ed33c 100644 --- a/tests/common/test_cache_config.py +++ b/tests/common/test_cache_config.py @@ -28,7 +28,7 @@ PROP_PARTITION_LOSS_POLICY, PROP_EAGER_TTL, PROP_STATISTICS_ENABLED, PROP_REBALANCE_MODE, PROP_REBALANCE_DELAY, PROP_REBALANCE_TIMEOUT, PROP_REBALANCE_BATCH_SIZE, PROP_REBALANCE_BATCHES_PREFETCH_COUNT, PROP_REBALANCE_ORDER, PROP_REBALANCE_THROTTLE, PROP_QUERY_ENTITIES, PROP_QUERY_PARALLELISM, PROP_QUERY_DETAIL_METRIC_SIZE, - PROP_SQL_SCHEMA, PROP_SQL_INDEX_INLINE_MAX_SIZE, PROP_SQL_ESCAPE_ALL, PROP_MAX_QUERY_ITERATORS + PROP_SQL_SCHEMA, PROP_SQL_INDEX_INLINE_MAX_SIZE, PROP_SQL_ESCAPE_ALL, PROP_MAX_QUERY_ITERATORS, PROP_EXPIRY_POLICY ) from pyignite.exceptions import CacheError @@ -36,8 +36,8 @@ @pytest.fixture -def test_cache_settings(): - return { +def test_cache_settings(expiry_policy_supported): + settings = { PROP_NAME: cache_name, PROP_CACHE_MODE: CacheMode.PARTITIONED, PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL, @@ -96,6 +96,13 @@ def test_cache_settings(): PROP_STATISTICS_ENABLED: True } + if expiry_policy_supported: + settings[PROP_EXPIRY_POLICY] = None + elif 'PROP_EXPIRY_POLICY' in ALL_PROPS: + del ALL_PROPS['PROP_EXPIRY_POLICY'] + + return settings + @pytest.fixture def cache(client): diff --git a/tests/common/test_expiry_policy.py b/tests/common/test_expiry_policy.py index eb96536..cc852c7 100644 --- a/tests/common/test_expiry_policy.py +++ b/tests/common/test_expiry_policy.py @@ -21,6 +21,7 @@ from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY +@pytest.mark.skip_if_no_expiry_policy def test_expiry_policy(cache): ttl, num_retries = 0.6, 10 cache_eternal = cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) @@ -74,6 +75,7 @@ def test_expiry_policy(cache): @pytest.mark.asyncio +@pytest.mark.skip_if_no_expiry_policy async def test_expiry_policy_async(async_cache): ttl, num_retries = 0.6, 10 cache_eternal = async_cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) @@ -129,8 +131,7 @@ async def test_expiry_policy_async(async_cache): result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) assert result == [True, False, False, False] - -@pytest.mark.parametrize( +create_cache_with_expiry_params = ( 'expiry_policy', [ None, @@ -139,6 +140,10 @@ async def test_expiry_policy_async(async_cache): ExpiryPolicy(create=2000, update=4000, access=6000) ] ) + + +@pytest.mark.parametrize(*create_cache_with_expiry_params) +@pytest.mark.skip_if_no_expiry_policy def test_create_cache_with_expiry_policy(client, expiry_policy): cache = client.create_cache({ PROP_NAME: 'expiry_cache', @@ -149,3 +154,18 @@ def test_create_cache_with_expiry_policy(client, expiry_policy): assert settings[PROP_EXPIRY_POLICY] == expiry_policy finally: cache.destroy() + + +@pytest.mark.parametrize(*create_cache_with_expiry_params) +@pytest.mark.skip_if_no_expiry_policy +@pytest.mark.asyncio +async def test_create_cache_with_expiry_policy_async(async_client, expiry_policy): + cache = await async_client.create_cache({ + PROP_NAME: 'expiry_cache', + PROP_EXPIRY_POLICY: expiry_policy + }) + try: + settings = await cache.settings() + assert settings[PROP_EXPIRY_POLICY] == expiry_policy + finally: + await cache.destroy() diff --git a/tests/conftest.py b/tests/conftest.py index 65134fd..1c65356 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -64,6 +64,7 @@ def pytest_addoption(parser): def pytest_configure(config): marker_docs = [ "skip_if_no_cext: mark test to run only if c extension is available", + "skip_if_no_expiry_policy: mark test to run only if expiry policy is supported by server", "examples: mark test to run only if --examples are set" ] diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py index 673d594..f1ffcfd 100644 --- a/tests/custom/test_cluster.py +++ b/tests/custom/test_cluster.py @@ -44,6 +44,15 @@ def server2(with_persistence, cleanup): yield from start_ignite_gen(idx=2, use_persistence=with_persistence) +@pytest.fixture(autouse=True) +def cluster_api_supported(request, server1): + client = Client() + client.connect('127.0.0.1', 10801) + + if not client.protocol_context.is_cluster_api_supported(): + pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + + def test_cluster_set_active(with_persistence): key = 42 val = 42 From f398fca618ed626645b370c13b22a6d58a2c3c88 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 21 Apr 2021 22:20:34 +0300 Subject: [PATCH 3/6] IGNITE-14595 Wip --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7a23147..8e009de 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,9 @@ Apache Ignite thin (binary protocol) client, written in Python 3. ## Prerequisites -- Python 3.4 or above (3.6, 3.7 and 3.8 are tested), +- Python 3.6 or above (3.6, 3.7, 3.8 and 3.9 are tested), - Access to Apache Ignite node, local or remote. The current thin client - version was tested on Apache Ignite 2.7.0 (binary client protocol 1.2.0). + version was tested on Apache Ignite 2.10 (binary client protocol 1.7.0). ## Installation From 3f1abb47ae4571859b6b6b11ef28124173e973ee Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 21 Apr 2021 22:24:36 +0300 Subject: [PATCH 4/6] IGNITE-14595 Wip --- pyignite/cache.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyignite/cache.py b/pyignite/cache.py index 0ca76cf..47b9413 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -17,7 +17,7 @@ from .datatypes import prop_codes, ExpiryPolicy from .datatypes.internal import AnyDataObject -from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError +from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError, NotSupportedByClusterError from .queries.query import CacheInfo from .utils import cache_id, status_to_exception from .api.cache_config import ( @@ -137,6 +137,9 @@ def with_expire_policy( self, expiry_policy: ExpiryPolicy = None, create: Union[int, float] = ExpiryPolicy.UNCHANGED, update: Union[int, float] = ExpiryPolicy.UNCHANGED, access: Union[int, float] = ExpiryPolicy.UNCHANGED ): + if not self.client.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + cache_cls = type(self) if not expiry_policy: expiry_policy = ExpiryPolicy(create=create, update=update, access=access) From 3ff3281995c5986a5faff8eb3d522f0d4d3290e3 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 22 Apr 2021 13:56:51 +0300 Subject: [PATCH 5/6] IGNITE-14595 Add docs and examples --- docs/async_examples.rst | 31 +- docs/datatypes/cache_props.rst | 152 +++++---- docs/examples.rst | 32 +- docs/source/pyignite.aio_cluster.rst | 22 ++ docs/source/pyignite.cluster.rst | 22 ++ .../pyignite.datatypes.cluster_state.rst | 21 ++ .../pyignite.datatypes.expiry_policy.rst | 21 ++ docs/source/pyignite.datatypes.rst | 3 +- docs/source/pyignite.rst | 3 +- examples/expiry_policy.py | 110 ++++++ examples/failover.py | 45 ++- pyignite/aio_client.py | 4 +- pyignite/aio_cluster.py | 15 +- pyignite/api/cache_config.py | 15 +- pyignite/api/key_value.py | 314 +++++++++--------- pyignite/api/sql.py | 38 +-- pyignite/cache.py | 14 +- pyignite/client.py | 4 +- pyignite/cluster.py | 15 +- pyignite/datatypes/cluster_state.py | 4 + pyignite/datatypes/expiry_policy.py | 17 +- 21 files changed, 601 insertions(+), 301 deletions(-) create mode 100644 docs/source/pyignite.aio_cluster.rst create mode 100644 docs/source/pyignite.cluster.rst create mode 100644 docs/source/pyignite.datatypes.cluster_state.rst create mode 100644 docs/source/pyignite.datatypes.expiry_policy.rst create mode 100644 examples/expiry_policy.py diff --git a/docs/async_examples.rst b/docs/async_examples.rst index 363599a..f503b7c 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -48,14 +48,39 @@ that yields the resulting rows. .. literalinclude:: ../examples/async_key_value.py :language: python - :dedent: 4 + :dedent: 8 :lines: 39-50 +ExpiryPolicy +============ +File: `expiry_policy.py`_. -File: `async_sql.py`_. +You can enable expiry policy (TTL) by two approaches. + +Firstly, expiry policy can be set for entire cache by setting :py:attr:`~pyignite.datatypes.prop_codes.PROP_EXPIRY_POLICY` +in cache settings dictionary on creation. + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 8 + :lines: 72-75 + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 78-86 + +Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use +:py:meth:`~pyignite.cache.BaseCache.with_expire_policy` + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 93-102 SQL --- +File: `async_sql.py`_. First let us establish a connection. @@ -146,6 +171,6 @@ Finally, delete the tables used in this example with the following queries: - +.. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py .. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py .. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py \ No newline at end of file diff --git a/docs/datatypes/cache_props.rst b/docs/datatypes/cache_props.rst index 3cabbe6..380ccf2 100644 --- a/docs/datatypes/cache_props.rst +++ b/docs/datatypes/cache_props.rst @@ -26,78 +26,80 @@ Please refer to the `Apache Ignite Data Grid`_ documentation on cache synchronization, rebalance, affinity and other cache configuration-related matters. -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| Property | Ordinal | Property | Description | -| name | value | type | | -+=======================================+==========+==========+=======================================================+ -| Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | -| :py:meth:`~pyignite.client.Client.get_or_create_cache` of :py:class:`~pyignite.client.Client` | -| (:py:meth:`~pyignite.aio_client.AioClient.create_cache` or | -| :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` of :py:class:`~pyignite.aio_client.AioClient`). | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_NAME | 0 | str | Cache name. This is the only *required* property. | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_MODE | 1 | int | Cache mode: LOCAL=0, REPLICATED=1, PARTITIONED=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_ATOMICITY_MODE | 2 | int | Cache atomicity mode: TRANSACTIONAL=0, ATOMIC=1 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_BACKUPS_NUMBER | 3 | int | Number of backups | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_WRITE_SYNCHRONIZATION_MODE | 4 | int | Write synchronization mode: FULL_SYNC=0, | -| | | | FULL_ASYNC=1, PRIMARY_SYNC=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_COPY_ON_READ | 5 | bool | Copy-on-read | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_READ_FROM_BACKUP | 6 | bool | Read from backup | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_DATA_REGION_NAME | 100 | str | Data region name | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_IS_ONHEAP_CACHE_ENABLED | 101 | bool | Is OnHeap cache enabled? | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_ENTITIES | 200 | list | A list of query entities (see `Query entity`_) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_PARALLELISM | 201 | int | Query parallelism | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_DETAIL_METRIC_SIZE | 202 | int | Query detail metric size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_SCHEMA | 203 | str | SQL schema | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_INDEX_INLINE_MAX_SIZE | 204 | int | SQL index inline maximum size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_ESCAPE_ALL | 205 | bool | Turns on SQL escapes | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_MAX_QUERY_ITERATORS | 206 | int | Maximum number of query iterators | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_MODE | 300 | int | Rebalance mode: SYNC=0, ASYNC=1, NONE=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_DELAY | 301 | int | Rebalance delay (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_TIMEOUT | 302 | int | Rebalance timeout (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_BATCH_SIZE | 303 | int | Rebalance batch size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_BATCHES_PREFETCH_COUNT | 304 | int | Rebalance batches prefetch count | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_ORDER | 305 | int | Rebalance order | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_THROTTLE | 306 | int | Rebalance throttle (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_GROUP_NAME | 400 | str | Group name | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_KEY_CONFIGURATION | 401 | list | Cache key configuration (see `Cache key`_) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_DEFAULT_LOCK_TIMEOUT | 402 | int | Default lock timeout (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_MAX_CONCURRENT_ASYNC_OPERATIONS | 403 | int | Maximum number of concurrent asynchronous operations | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_PARTITION_LOSS_POLICY | 404 | int | Partition loss policy: READ_ONLY_SAFE=0, | -| | | | READ_ONLY_ALL=1, READ_WRITE_SAFE=2, READ_WRITE_ALL=3, | -| | | | IGNORE=4 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_EAGER_TTL | 405 | bool | Eager TTL | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | -+---------------------------------------+----------+----------+-------------------------------------------------------+ ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| Property | Ordinal | Property | Description | +| name | value | type | | ++=======================================+==========+============================================================+=======================================================+ +| Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | +| :py:meth:`~pyignite.client.Client.get_or_create_cache` of :py:class:`~pyignite.client.Client` | +| (:py:meth:`~pyignite.aio_client.AioClient.create_cache` or | +| :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` of :py:class:`~pyignite.aio_client.AioClient`). | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_NAME | 0 | str | Cache name. This is the only *required* property. | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_MODE | 1 | int | Cache mode: LOCAL=0, REPLICATED=1, PARTITIONED=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_ATOMICITY_MODE | 2 | int | Cache atomicity mode: TRANSACTIONAL=0, ATOMIC=1 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_BACKUPS_NUMBER | 3 | int | Number of backups | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_WRITE_SYNCHRONIZATION_MODE | 4 | int | Write synchronization mode: FULL_SYNC=0, | +| | | | FULL_ASYNC=1, PRIMARY_SYNC=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_COPY_ON_READ | 5 | bool | Copy-on-read | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_READ_FROM_BACKUP | 6 | bool | Read from backup | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_DATA_REGION_NAME | 100 | str | Data region name | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_IS_ONHEAP_CACHE_ENABLED | 101 | bool | Is OnHeap cache enabled? | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_ENTITIES | 200 | list | A list of query entities (see `Query entity`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_PARALLELISM | 201 | int | Query parallelism | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_DETAIL_METRIC_SIZE | 202 | int | Query detail metric size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_SCHEMA | 203 | str | SQL schema | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_INDEX_INLINE_MAX_SIZE | 204 | int | SQL index inline maximum size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_ESCAPE_ALL | 205 | bool | Turns on SQL escapes | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_MAX_QUERY_ITERATORS | 206 | int | Maximum number of query iterators | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_MODE | 300 | int | Rebalance mode: SYNC=0, ASYNC=1, NONE=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_DELAY | 301 | int | Rebalance delay (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_TIMEOUT | 302 | int | Rebalance timeout (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_BATCH_SIZE | 303 | int | Rebalance batch size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_BATCHES_PREFETCH_COUNT | 304 | int | Rebalance batches prefetch count | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_ORDER | 305 | int | Rebalance order | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_THROTTLE | 306 | int | Rebalance throttle (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_GROUP_NAME | 400 | str | Group name | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_KEY_CONFIGURATION | 401 | list | Cache key configuration (see `Cache key`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_DEFAULT_LOCK_TIMEOUT | 402 | int | Default lock timeout (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_MAX_CONCURRENT_ASYNC_OPERATIONS | 403 | int | Maximum number of concurrent asynchronous operations | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_PARTITION_LOSS_POLICY | 404 | int | Partition loss policy: READ_ONLY_SAFE=0, | +| | | | READ_ONLY_ALL=1, READ_WRITE_SAFE=2, READ_WRITE_ALL=3, | +| | | | IGNORE=4 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_EAGER_TTL | 405 | bool | Eager TTL | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_EXPIRY_POLICY | 407 | :py:class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` | Set expiry policy (see `Expiry policy`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ Query entity ------------ @@ -159,3 +161,9 @@ A dict of the following format: - `affinity_key_field_name`: name of the affinity key field. .. _Apache Ignite Data Grid: https://apacheignite.readme.io/docs/data-grid + +Expiry policy +------------- + +Set expiry policy to cache (see :py:class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy`). If set to `None`, +expiry policy will not be set. diff --git a/docs/examples.rst b/docs/examples.rst index 0379330..bd65a84 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -85,6 +85,33 @@ As a rule of thumb: Refer the :ref:`data_types` section for the full list of parser/constructor classes you can use as type hints. +ExpiryPolicy +============ +File: `expiry_policy.py`_. + +You can enable expiry policy (TTL) by two approaches. + +Firstly, expiry policy can be set for entire cache by setting :py:attr:`~pyignite.datatypes.prop_codes.PROP_EXPIRY_POLICY` +in cache settings dictionary on creation. + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 8 + :lines: 32-35 + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 38-44 + +Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use +:py:meth:`~pyignite.cache.BaseCache.with_expire_policy` + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 51-58 + Scan ==== File: `scans.py`_. @@ -558,13 +585,13 @@ Gather 3 Ignite nodes on `localhost` into one cluster and run: .. literalinclude:: ../examples/failover.py :language: python - :lines: 16-53 + :lines: 16-52 Then try shutting down and restarting nodes, and see what happens. .. literalinclude:: ../examples/failover.py :language: python - :lines: 55-67 + :lines: 54-66 Client reconnection do not require an explicit user action, like calling a special method or resetting a parameter. @@ -683,6 +710,7 @@ with the following message: .. _type_hints.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/type_hints.py .. _failover.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/failover.py .. _scans.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/scans.py +.. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py .. _sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/sql.py .. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py .. _binary_basics.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/binary_basics.py diff --git a/docs/source/pyignite.aio_cluster.rst b/docs/source/pyignite.aio_cluster.rst new file mode 100644 index 0000000..ee2fa1b --- /dev/null +++ b/docs/source/pyignite.aio_cluster.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_cluster module +=========================== + +.. automodule:: pyignite.aio_cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.cluster.rst b/docs/source/pyignite.cluster.rst new file mode 100644 index 0000000..cacdfb7 --- /dev/null +++ b/docs/source/pyignite.cluster.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.cluster module +======================= + +.. automodule:: pyignite.cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.cluster_state.rst b/docs/source/pyignite.datatypes.cluster_state.rst new file mode 100644 index 0000000..a1d7663 --- /dev/null +++ b/docs/source/pyignite.datatypes.cluster_state.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.cluster_state module +======================================= + +.. automodule:: pyignite.datatypes.cluster_state + :members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.expiry_policy.rst b/docs/source/pyignite.datatypes.expiry_policy.rst new file mode 100644 index 0000000..87d651e --- /dev/null +++ b/docs/source/pyignite.datatypes.expiry_policy.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.expiry_policy module +======================================= + +.. automodule:: pyignite.datatypes.expiry_policy + :members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.rst b/docs/source/pyignite.datatypes.rst index 269d500..70f7714 100644 --- a/docs/source/pyignite.datatypes.rst +++ b/docs/source/pyignite.datatypes.rst @@ -31,6 +31,8 @@ Submodules pyignite.datatypes.cache_config pyignite.datatypes.cache_properties pyignite.datatypes.complex + pyignite.datatypes.cluster_state + pyignite.datatypes.expiry_policy pyignite.datatypes.internal pyignite.datatypes.key_value pyignite.datatypes.null_object @@ -39,4 +41,3 @@ Submodules pyignite.datatypes.primitive_objects pyignite.datatypes.sql pyignite.datatypes.standard - diff --git a/docs/source/pyignite.rst b/docs/source/pyignite.rst index 85e31a8..c2a36fe 100644 --- a/docs/source/pyignite.rst +++ b/docs/source/pyignite.rst @@ -39,7 +39,8 @@ Submodules pyignite.aio_cache pyignite.client pyignite.aio_client - pyignite.constants + pyignite.cluster + pyignite.aio_cluster pyignite.cursors pyignite.exceptions diff --git a/examples/expiry_policy.py b/examples/expiry_policy.py new file mode 100644 index 0000000..6dd3eb4 --- /dev/null +++ b/examples/expiry_policy.py @@ -0,0 +1,110 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import time + +from pyignite import Client, AioClient +from pyignite.datatypes import ExpiryPolicy +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY + + +def main(): + print("Running sync ExpiryPolicy example.") + + client = Client() + with client.connect('127.0.0.1', 10800): + if not client.protocol_context.is_expiry_policy_supported(): + print("'ExpiryPolicy' is not supported by cluster.") + + print("Create cache with expiry policy.") + ttl_cache = client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + }) + + try: + ttl_cache.put(1, 1) + time.sleep(0.5) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = 1 + time.sleep(1.2) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = None + finally: + ttl_cache.destroy() + + print("Create simple Cache and set TTL through `with_expire_policy`") + simple_cache = client.create_cache('test') + try: + ttl_cache = simple_cache.with_expire_policy(access=1.0) + ttl_cache.put(1, 1) + time.sleep(0.5) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = 1 + time.sleep(1.7) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = None + finally: + simple_cache.destroy() + + +async def async_main(): + print("Running async ExpiryPolicy example.") + + client = AioClient() + async with client.connect('127.0.0.1', 10800): + if not client.protocol_context.is_expiry_policy_supported(): + print("'ExpiryPolicy' is not supported by cluster.") + + print("Create cache with expiry policy.") + ttl_cache = await client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + }) + + try: + await ttl_cache.put(1, 1) + await asyncio.sleep(0.5) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = 1 + await asyncio.sleep(1.2) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = None + finally: + await ttl_cache.destroy() + + print("Create simple Cache and set TTL through `with_expire_policy`") + simple_cache = await client.create_cache('test') + try: + ttl_cache = simple_cache.with_expire_policy(access=1.0) + await ttl_cache.put(1, 1) + await asyncio.sleep(0.5) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = 1 + await asyncio.sleep(1.7) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = None + finally: + await simple_cache.destroy() + +if __name__ == '__main__': + main() + + loop = asyncio.get_event_loop() + loop.run_until_complete(async_main()) diff --git a/examples/failover.py b/examples/failover.py index 21ab547..3a5ee42 100644 --- a/examples/failover.py +++ b/examples/failover.py @@ -26,31 +26,30 @@ ] -def main(): - client = Client(timeout=4.0) - with client.connect(nodes): - print('Connected') +client = Client(timeout=4.0) +with client.connect(nodes): + print('Connected') - my_cache = client.get_or_create_cache({ - PROP_NAME: 'my_cache', - PROP_CACHE_MODE: CacheMode.PARTITIONED, - PROP_BACKUPS_NUMBER: 2, - }) - my_cache.put('test_key', 0) - test_value = 0 + my_cache = client.get_or_create_cache({ + PROP_NAME: 'my_cache', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_BACKUPS_NUMBER: 2, + }) + my_cache.put('test_key', 0) + test_value = 0 - # abstract main loop - while True: - try: - # do the work - test_value = my_cache.get('test_key') or 0 - my_cache.put('test_key', test_value + 1) - except (OSError, SocketError) as e: - # recover from error (repeat last command, check data - # consistency or just continue − depends on the task) - print(f'Error: {e}') - print(f'Last value: {test_value}') - print('Reconnecting') + # abstract main loop + while True: + try: + # do the work + test_value = my_cache.get('test_key') or 0 + my_cache.put('test_key', test_value + 1) + except (OSError, SocketError) as e: + # recover from error (repeat last command, check data + # consistency or just continue − depends on the task) + print(f'Error: {e}') + print(f'Last value: {test_value}') + print('Reconnecting') # Connected # Error: Connection broken. diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 40fd722..b0498f7 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -469,8 +469,8 @@ def sql( def get_cluster(self) -> 'AioCluster': """ - Gets client cluster facade. + Get client cluster facade. - :return: AioClient cluster facade. + :return: :py:class:`~pyignite.aio_cluster.AioCluster` instance. """ return AioCluster(self) diff --git a/pyignite/aio_cluster.py b/pyignite/aio_cluster.py index 08d1f34..afbc41b 100644 --- a/pyignite/aio_cluster.py +++ b/pyignite/aio_cluster.py @@ -31,6 +31,9 @@ class AioCluster: """ def __init__(self, client: 'AioClient'): + """ + :param client: :py:class:`~pyignite.aio_client.AioClient` instance. + """ self._client = client @status_to_exception(ClusterError) @@ -38,8 +41,10 @@ async def get_state(self) -> 'ClusterState': """ Gets current cluster state. - :return: Current cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :return: Current cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return await cluster_get_state_async(await self._client.random_node()) @@ -51,7 +56,9 @@ async def set_state(self, state: 'ClusterState'): Note: Deactivation clears in-memory caches (without persistence) including the system caches. - :param state: New cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :param state: New cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return await cluster_set_state_async(await self._client.random_node(), state) diff --git a/pyignite/api/cache_config.py b/pyignite/api/cache_config.py index 511c18d..7f2869b 100644 --- a/pyignite/api/cache_config.py +++ b/pyignite/api/cache_config.py @@ -60,26 +60,27 @@ def compact_cache_config(cache_config: dict) -> dict: return result -def cache_get_configuration(connection: 'Connection', cache: CacheInfo, query_id=None) -> 'APIResult': +def cache_get_configuration(connection: 'Connection', cache_info: CacheInfo, query_id=None) -> 'APIResult': """ Gets configuration for the given cache. :param connection: connection to Ignite server, - :param cache: Cache meta info, + :param cache_info: cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Result value is OrderedDict with the cache configuration parameters. """ - return __cache_get_configuration(connection, cache, query_id) + return __cache_get_configuration(connection, cache_info, query_id) -async def cache_get_configuration_async(connection: 'AioConnection', cache: CacheInfo, query_id=None) -> 'APIResult': +async def cache_get_configuration_async( + connection: 'AioConnection', cache_info: CacheInfo, query_id=None) -> 'APIResult': """ Async version of cache_get_configuration. """ - return await __cache_get_configuration(connection, cache, query_id) + return await __cache_get_configuration(connection, cache_info, query_id) def __post_process_cache_config(result): @@ -88,7 +89,7 @@ def __post_process_cache_config(result): return result -def __cache_get_configuration(connection, cache, query_id): +def __cache_get_configuration(connection, cache_info, query_id): query_struct = Query( OP_CACHE_GET_CONFIGURATION, [ @@ -98,7 +99,7 @@ def __cache_get_configuration(connection, cache, query_id): ) return query_perform(query_struct, connection, query_params={ - 'cache_info': cache + 'cache_info': cache_info }, response_config=[ ('cache_config', get_cache_config_struct(connection.protocol_context)) diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 6fe8183..5038051 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -31,14 +31,14 @@ from ..queries.query import CacheInfo -def cache_put(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, +def cache_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key @@ -51,19 +51,19 @@ def cache_put(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ - return __cache_put(connection, cache, key, value, key_hint, value_hint, query_id) + return __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_put_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, +async def cache_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put """ - return await __cache_put(connection, cache, key, value, key_hint, value_hint, query_id) + return await __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_put(connection, cache, key, value, key_hint, value_hint, query_id): +def __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_PUT, [ @@ -76,20 +76,20 @@ def __cache_put(connection, cache, key, value, key_hint, value_hint, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'value': value } ) -def cache_get(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, +def cache_get(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Retrieves a value from cache by key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, @@ -99,18 +99,18 @@ def cache_get(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'I :return: API result data object. Contains zero status and a value retrieved on success, non-zero status and an error description on failure. """ - return __cache_get(connection, cache, key, key_hint, query_id) + return __cache_get(connection, cache_info, key, key_hint, query_id) -async def cache_get_async(connection: 'AioConnection', cache: CacheInfo, key: Any, +async def cache_get_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get """ - return await __cache_get(connection, cache, key, key_hint, query_id) + return await __cache_get(connection, cache_info, key, key_hint, query_id) -def __cache_get(connection, cache, key, key_hint, query_id): +def __cache_get(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_GET, [ @@ -122,7 +122,7 @@ def __cache_get(connection, cache, key, key_hint, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -132,13 +132,13 @@ def __cache_get(connection, cache, key, key_hint, query_id): ) -def cache_get_all(connection: 'Connection', cache: CacheInfo, keys: Iterable, +def cache_get_all(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -147,18 +147,18 @@ def cache_get_all(connection: 'Connection', cache: CacheInfo, keys: Iterable, retrieved key-value pairs, non-zero status and an error description on failure. """ - return __cache_get_all(connection, cache, keys, query_id) + return __cache_get_all(connection, cache_info, keys, query_id) -async def cache_get_all_async(connection: 'AioConnection', cache: CacheInfo, keys: Iterable, +async def cache_get_all_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_all. """ - return await __cache_get_all(connection, cache, keys, query_id) + return await __cache_get_all(connection, cache_info, keys, query_id) -def __cache_get_all(connection, cache, keys, query_id): +def __cache_get_all(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_GET_ALL, [ @@ -170,7 +170,7 @@ def __cache_get_all(connection, cache, keys, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'keys': keys, }, response_config=[ @@ -180,14 +180,14 @@ def __cache_get_all(connection, cache, keys, query_id): ) -def cache_put_all(connection: 'Connection', cache: CacheInfo, pairs: dict, +def cache_put_all(connection: 'Connection', cache_info: CacheInfo, pairs: dict, query_id: Optional[int] = None) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations if any). :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param pairs: dictionary type parameters, contains key-value pairs to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), @@ -197,18 +197,18 @@ def cache_put_all(connection: 'Connection', cache: CacheInfo, pairs: dict, :return: API result data object. Contains zero status if key-value pairs are written, non-zero status and an error description otherwise. """ - return __cache_put_all(connection, cache, pairs, query_id) + return __cache_put_all(connection, cache_info, pairs, query_id) -async def cache_put_all_async(connection: 'AioConnection', cache: CacheInfo, pairs: dict, +async def cache_put_all_async(connection: 'AioConnection', cache_info: CacheInfo, pairs: dict, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put_all. """ - return await __cache_put_all(connection, cache, pairs, query_id) + return await __cache_put_all(connection, cache_info, pairs, query_id) -def __cache_put_all(connection, cache, pairs, query_id): +def __cache_put_all(connection, cache_info, pairs, query_id): query_struct = Query( OP_CACHE_PUT_ALL, [ @@ -220,19 +220,19 @@ def __cache_put_all(connection, cache, pairs, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'data': pairs, }, ) -def cache_contains_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, +def cache_contains_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, @@ -243,18 +243,18 @@ def cache_contains_key(connection: 'Connection', cache: CacheInfo, key: Any, key retrieved on success: `True` when key is present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_key(connection, cache, key, key_hint, query_id) + return __cache_contains_key(connection, cache_info, key, key_hint, query_id) -async def cache_contains_key_async(connection: 'AioConnection', cache: CacheInfo, key: Any, +async def cache_contains_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_contains_key. """ - return await __cache_contains_key(connection, cache, key, key_hint, query_id) + return await __cache_contains_key(connection, cache_info, key, key_hint, query_id) -def __cache_contains_key(connection, cache, key, key_hint, query_id): +def __cache_contains_key(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEY, [ @@ -266,7 +266,7 @@ def __cache_contains_key(connection, cache, key, key_hint, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -276,13 +276,13 @@ def __cache_contains_key(connection, cache, key, key_hint, query_id): ) -def cache_contains_keys(connection: 'Connection', cache: CacheInfo, keys: Iterable, +def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: a list of keys or (key, type hint) tuples, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -291,18 +291,18 @@ def cache_contains_keys(connection: 'Connection', cache: CacheInfo, keys: Iterab retrieved on success: `True` when all keys are present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_keys(connection, cache, keys, query_id) + return __cache_contains_keys(connection, cache_info, keys, query_id) -async def cache_contains_keys_async(connection: 'AioConnection', cache: CacheInfo, keys: Iterable, +async def cache_contains_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_contains_keys. """ - return await __cache_contains_keys(connection, cache, keys, query_id) + return await __cache_contains_keys(connection, cache_info, keys, query_id) -def __cache_contains_keys(connection, cache, keys, query_id): +def __cache_contains_keys(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ @@ -314,7 +314,7 @@ def __cache_contains_keys(connection, cache, keys, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'keys': keys, }, response_config=[ @@ -324,15 +324,15 @@ def __cache_contains_keys(connection, cache, keys, query_id): ) -def cache_get_and_put(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, +def cache_get_and_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ - Puts a value with a given key to cache, and returns the previous value + Puts a value with a given key to cache_info, and returns the previous value for that key, or null value if there was not such key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key @@ -346,19 +346,19 @@ def cache_get_and_put(connection: 'Connection', cache: CacheInfo, key: Any, valu or None if a value is written, non-zero status and an error description in case of error. """ - return __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, query_id) + return __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_get_and_put_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, +async def cache_get_and_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_put. """ - return await __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, query_id) + return await __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, query_id): +def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT, [ @@ -371,7 +371,7 @@ def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, que return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -382,7 +382,7 @@ def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, que ) -def cache_get_and_replace(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, +def cache_get_and_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ @@ -391,7 +391,7 @@ def cache_get_and_replace(connection: 'Connection', cache: CacheInfo, key: Any, for that key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key @@ -404,19 +404,19 @@ def cache_get_and_replace(connection: 'Connection', cache: CacheInfo, key: Any, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, query_id) + return __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id) -async def cache_get_and_replace_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, +async def cache_get_and_replace_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_replace. """ - return await __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, query_id) + return await __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id) -def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, query_id): +def __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('cache_info', CacheInfo), @@ -428,7 +428,7 @@ def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -439,13 +439,13 @@ def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, ) -def cache_get_and_remove(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, +def cache_get_and_remove(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, @@ -455,16 +455,16 @@ def cache_get_and_remove(connection: 'Connection', cache: CacheInfo, key: Any, k :return: API result data object. Contains zero status and an old value or None, non-zero status and an error description otherwise. """ - return __cache_get_and_remove(connection, cache, key, key_hint, query_id) + return __cache_get_and_remove(connection, cache_info, key, key_hint, query_id) async def cache_get_and_remove_async( - connection: 'AioConnection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': - return await __cache_get_and_remove(connection, cache, key, key_hint, query_id) + return await __cache_get_and_remove(connection, cache_info, key, key_hint, query_id) -def __cache_get_and_remove(connection, cache, key, key_hint, query_id): +def __cache_get_and_remove(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('cache_info', CacheInfo), @@ -475,7 +475,7 @@ def __cache_get_and_remove(connection, cache, key, key_hint, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -485,7 +485,7 @@ def __cache_get_and_remove(connection, cache, key, key_hint, query_id): ) -def cache_put_if_absent(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, +def cache_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ @@ -493,7 +493,7 @@ def cache_put_if_absent(connection: 'Connection', cache: CacheInfo, key: Any, va does not already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key @@ -506,19 +506,19 @@ def cache_put_if_absent(connection: 'Connection', cache: CacheInfo, key: Any, va :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) + return __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_put_if_absent_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, +async def cache_put_if_absent_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put_if_absent. """ - return await __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) + return await __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id): +def __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ @@ -531,7 +531,7 @@ def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, q return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -542,7 +542,7 @@ def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, q ) -def cache_get_and_put_if_absent(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, +def cache_get_and_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ @@ -550,7 +550,7 @@ def cache_get_and_put_if_absent(connection: 'Connection', cache: CacheInfo, key: already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key @@ -563,19 +563,19 @@ def cache_get_and_put_if_absent(connection: 'Connection', cache: CacheInfo, key: :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) + return __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, +async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_put_if_absent. """ - return await __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id) + return await __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, query_id): +def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ @@ -588,7 +588,7 @@ def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -599,14 +599,14 @@ def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value ) -def cache_replace(connection: 'Connection', cache: CacheInfo, key: Any, value: Any, +def cache_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key @@ -620,19 +620,19 @@ def cache_replace(connection: 'Connection', cache: CacheInfo, key: Any, value: A success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace(connection, cache, key, value, key_hint, value_hint, query_id) + return __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_replace_async(connection: 'AioConnection', cache: CacheInfo, key: Any, value: Any, +async def cache_replace_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_replace. """ - return await __cache_replace(connection, cache, key, value, key_hint, value_hint, query_id) + return await __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_replace(connection, cache, key, value, key_hint, value_hint, query_id): +def __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_REPLACE, [ @@ -645,7 +645,7 @@ def __cache_replace(connection, cache, key, value, key_hint, value_hint, query_i return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -656,7 +656,7 @@ def __cache_replace(connection, cache, key, value, key_hint, value_hint, query_i ) -def cache_replace_if_equals(connection: 'Connection', cache: CacheInfo, key: Any, sample: Any, value: Any, +def cache_replace_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ @@ -664,7 +664,7 @@ def cache_replace_if_equals(connection: 'Connection', cache: CacheInfo, key: Any and value equals provided sample. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param value: new value for the given key, @@ -681,22 +681,22 @@ def cache_replace_if_equals(connection: 'Connection', cache: CacheInfo, key: Any success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, + return __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint, query_id) async def cache_replace_if_equals_async( - connection: 'AioConnection', cache: CacheInfo, key: Any, sample: Any, value: Any, + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_replace_if_equals. """ - return await __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, + return await __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint, query_id) -def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, query_id): +def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint, query_id): query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ @@ -710,7 +710,7 @@ def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, s return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'sample': sample, 'value': value, @@ -722,30 +722,30 @@ def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, s ) -def cache_clear(connection: 'Connection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': +def cache_clear(connection: 'Connection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear(connection, cache, query_id) + return __cache_clear(connection, cache_info, query_id) async def cache_clear_async( - connection: 'AioConnection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear. """ - return await __cache_clear(connection, cache, query_id) + return await __cache_clear(connection, cache_info, query_id) -def __cache_clear(connection, cache, query_id): +def __cache_clear(connection, cache_info, query_id): query_struct = Query( OP_CACHE_CLEAR, [ @@ -756,18 +756,18 @@ def __cache_clear(connection, cache, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, }, ) -def cache_clear_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, +def cache_clear_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, @@ -777,18 +777,18 @@ def cache_clear_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hi :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_key(connection, cache, key, key_hint, query_id) + return __cache_clear_key(connection, cache_info, key, key_hint, query_id) -async def cache_clear_key_async(connection: 'AioConnection', cache: CacheInfo, key: Any, +async def cache_clear_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear_key. """ - return await __cache_clear_key(connection, cache, key, key_hint, query_id) + return await __cache_clear_key(connection, cache_info, key, key_hint, query_id) -def __cache_clear_key(connection, cache, key, key_hint, query_id): +def __cache_clear_key(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_CLEAR_KEY, [ @@ -800,19 +800,19 @@ def __cache_clear_key(connection, cache, key, key_hint, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, }, ) def cache_clear_keys( - connection: 'Connection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': + connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -820,18 +820,19 @@ def cache_clear_keys( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_keys(connection, cache, keys, query_id) + return __cache_clear_keys(connection, cache_info, keys, query_id) async def cache_clear_keys_async( - connection: 'AioConnection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None +) -> 'APIResult': """ Async version of cache_clear_keys. """ - return await __cache_clear_keys(connection, cache, keys, query_id) + return await __cache_clear_keys(connection, cache_info, keys, query_id) -def __cache_clear_keys(connection, cache, keys, query_id): +def __cache_clear_keys(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_CLEAR_KEYS, [ @@ -843,19 +844,19 @@ def __cache_clear_keys(connection, cache, keys, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'keys': keys, }, ) -def cache_remove_key(connection: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, +def cache_remove_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, @@ -866,18 +867,18 @@ def cache_remove_key(connection: 'Connection', cache: CacheInfo, key: Any, key_h success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_key(connection, cache, key, key_hint, query_id) + return __cache_remove_key(connection, cache_info, key, key_hint, query_id) -async def cache_remove_key_async(connection: 'AioConnection', cache: CacheInfo, key: Any, +async def cache_remove_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_key. """ - return await __cache_remove_key(connection, cache, key, key_hint, query_id) + return await __cache_remove_key(connection, cache_info, key, key_hint, query_id) -def __cache_remove_key(connection, cache, key, key_hint, query_id): +def __cache_remove_key(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_REMOVE_KEY, [ @@ -889,7 +890,7 @@ def __cache_remove_key(connection, cache, key, key_hint, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -899,7 +900,7 @@ def __cache_remove_key(connection, cache, key, key_hint, query_id): ) -def cache_remove_if_equals(connection: 'Connection', cache: CacheInfo, key: Any, sample: Any, +def cache_remove_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ @@ -907,7 +908,7 @@ def cache_remove_if_equals(connection: 'Connection', cache: CacheInfo, key: Any, actual value, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param key_hint: (optional) Ignite data type, for which the given key @@ -921,19 +922,19 @@ def cache_remove_if_equals(connection: 'Connection', cache: CacheInfo, key: Any, success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, query_id) + return __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id) async def cache_remove_if_equals_async( - connection: 'AioConnection', cache: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_if_equals. """ - return await __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, query_id) + return await __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id) -def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, query_id): +def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id): query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ @@ -946,7 +947,7 @@ def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hi return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'sample': sample, }, @@ -958,12 +959,12 @@ def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hi def cache_remove_keys( - connection: 'Connection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': + connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -971,18 +972,19 @@ def cache_remove_keys( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_keys(connection, cache, keys, query_id) + return __cache_remove_keys(connection, cache_info, keys, query_id) async def cache_remove_keys_async( - connection: 'AioConnection', cache: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None +) -> 'APIResult': """ Async version of cache_remove_keys. """ - return await __cache_remove_keys(connection, cache, keys, query_id) + return await __cache_remove_keys(connection, cache_info, keys, query_id) -def __cache_remove_keys(connection, cache, keys, query_id): +def __cache_remove_keys(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_REMOVE_KEYS, [ @@ -994,36 +996,36 @@ def __cache_remove_keys(connection, cache, keys, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'keys': keys, }, ) -def cache_remove_all(connection: 'Connection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_all(connection: 'Connection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ - Removes all entries from cache, notifying listeners and cache writers. + Removes all entries from cache_info, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_all(connection, cache, query_id) + return __cache_remove_all(connection, cache_info, query_id) async def cache_remove_all_async( - connection: 'AioConnection', cache: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_all. """ - return await __cache_remove_all(connection, cache, query_id) + return await __cache_remove_all(connection, cache_info, query_id) -def __cache_remove_all(connection, cache, query_id): +def __cache_remove_all(connection, cache_info, query_id): query_struct = Query( OP_CACHE_REMOVE_ALL, [ @@ -1034,18 +1036,18 @@ def __cache_remove_all(connection, cache, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, }, ) -def cache_get_size(connection: 'Connection', cache: CacheInfo, peek_modes: Union[int, list, tuple] = None, +def cache_get_size(connection: 'Connection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None) -> 'APIResult': """ Gets the number of entries in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to pimary cache partitions (PeekModes.PRIMARY), @@ -1056,17 +1058,17 @@ def cache_get_size(connection: 'Connection', cache: CacheInfo, peek_modes: Union cache entries on success, non-zero status and an error description otherwise. """ - return __cache_get_size(connection, cache, peek_modes, query_id) + return __cache_get_size(connection, cache_info, peek_modes, query_id) async def cache_get_size_async( - connection: 'AioConnection', cache: CacheInfo, peek_modes: Union[int, list, tuple] = None, + connection: 'AioConnection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None ) -> 'APIResult': - return await __cache_get_size(connection, cache, peek_modes, query_id) + return await __cache_get_size(connection, cache_info, peek_modes, query_id) -def __cache_get_size(connection, cache, peek_modes, query_id): +def __cache_get_size(connection, cache_info, peek_modes, query_id): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1083,7 +1085,7 @@ def __cache_get_size(connection, cache, peek_modes, query_id): return query_perform( query_struct, connection, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'peek_modes': peek_modes, }, response_config=[ @@ -1093,7 +1095,7 @@ def __cache_get_size(connection, cache, peek_modes, query_id): ) -def cache_local_peek(conn: 'Connection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, +def cache_local_peek(conn: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. @@ -1102,7 +1104,7 @@ def cache_local_peek(conn: 'Connection', cache: CacheInfo, key: Any, key_hint: ' node. :param conn: connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: entry key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, @@ -1115,20 +1117,20 @@ def cache_local_peek(conn: 'Connection', cache: CacheInfo, key: Any, key_hint: ' :return: API result data object. Contains zero status and a peeked value (null if not found). """ - return __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id) + return __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id) async def cache_local_peek_async( - conn: 'AioConnection', cache: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + conn: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None ) -> 'APIResult': """ Async version of cache_local_peek. """ - return await __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id) + return await __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id) -def __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id): +def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1146,7 +1148,7 @@ def __cache_local_peek(conn, cache, key, key_hint, peek_modes, query_id): return query_perform( query_struct, conn, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'key': key, 'peek_modes': peek_modes, }, diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index 6f66347..267bc5b 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -27,13 +27,13 @@ from ..queries.response import SQLResponse -def scan(conn: 'Connection', cache: CacheInfo, page_size: int, partitions: int = -1, local: bool = False, +def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: int = -1, local: bool = False, query_id: int = None) -> APIResult: """ Performs scan query. :param conn: connection to Ignite server, - :param cache: Cache meta info. + :param cache_info: cache meta info. :param page_size: cursor page size, :param partitions: (optional) number of partitions to query (negative to query entire cache), @@ -53,15 +53,15 @@ def scan(conn: 'Connection', cache: CacheInfo, page_size: int, partitions: int = * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ - return __scan(conn, cache, page_size, partitions, local, query_id) + return __scan(conn, cache_info, page_size, partitions, local, query_id) -async def scan_async(conn: 'AioConnection', cache: CacheInfo, page_size: int, partitions: int = -1, +async def scan_async(conn: 'AioConnection', cache_info: CacheInfo, page_size: int, partitions: int = -1, local: bool = False, query_id: int = None) -> APIResult: """ Async version of scan. """ - return await __scan(conn, cache, page_size, partitions, local, query_id) + return await __scan(conn, cache_info, page_size, partitions, local, query_id) def __query_result_post_process(result): @@ -70,7 +70,7 @@ def __query_result_post_process(result): return result -def __scan(conn, cache, page_size, partitions, local, query_id): +def __scan(conn, cache_info, page_size, partitions, local, query_id): query_struct = Query( OP_QUERY_SCAN, [ @@ -85,7 +85,7 @@ def __scan(conn, cache, page_size, partitions, local, query_id): return query_perform( query_struct, conn, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'filter': None, 'page_size': page_size, 'partitions': partitions, @@ -151,7 +151,7 @@ def __scan_cursor_get_page(conn, cursor, query_id): @deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " "Use sql_fields instead") def sql( - conn: 'Connection', cache: CacheInfo, + conn: 'Connection', cache_info: CacheInfo, table_name: str, query_str: str, page_size: int, query_args=None, distributed_joins: bool = False, replicated_only: bool = False, local: bool = False, timeout: int = 0, query_id: int = None @@ -161,7 +161,7 @@ def sql( the whole record (key and value). :param conn: connection to Ignite server, - :param cache: Cache meta info, + :param cache_info: Cache meta info, :param table_name: name of a type or SQL table, :param query_str: SQL query string, :param page_size: cursor page size, @@ -209,7 +209,7 @@ def sql( result = query_struct.perform( conn, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'table_name': table_name, 'query_str': query_str, 'query_args': query_args, @@ -277,7 +277,7 @@ def sql_cursor_get_page( def sql_fields( - conn: 'Connection', cache: CacheInfo, + conn: 'Connection', cache_info: CacheInfo, query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, @@ -289,7 +289,7 @@ def sql_fields( Performs SQL fields query. :param conn: connection to Ignite server, - :param cache: Cache meta info. + :param cache_info: cache meta info. :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments. List of values or @@ -327,13 +327,13 @@ def sql_fields( * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ - return __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, + return __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, query_id) async def sql_fields_async( - conn: 'AioConnection', cache: CacheInfo, + conn: 'AioConnection', cache_info: CacheInfo, query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, @@ -344,13 +344,13 @@ async def sql_fields_async( """ Async version of sql_fields. """ - return await __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, - local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout, query_id) + return await __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, + include_field_names, max_rows, timeout, query_id) def __sql_fields( - conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, + conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, query_id ): if query_args is None: @@ -382,7 +382,7 @@ def __sql_fields( return query_perform( query_struct, conn, query_params={ - 'cache_info': cache, + 'cache_info': cache_info, 'schema': schema, 'page_size': page_size, 'max_rows': max_rows, diff --git a/pyignite/cache.py b/pyignite/cache.py index 47b9413..a2444a4 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -134,9 +134,19 @@ def cache_id(self) -> int: return self._cache_info.cache_id def with_expire_policy( - self, expiry_policy: ExpiryPolicy = None, create: Union[int, float] = ExpiryPolicy.UNCHANGED, - update: Union[int, float] = ExpiryPolicy.UNCHANGED, access: Union[int, float] = ExpiryPolicy.UNCHANGED + self, expiry_policy: Optional[ExpiryPolicy] = None, + create: Union[int, float] = ExpiryPolicy.UNCHANGED, + update: Union[int, float] = ExpiryPolicy.UNCHANGED, + access: Union[int, float] = ExpiryPolicy.UNCHANGED ): + """ + :param expiry_policy: optional :class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` + object. If it is set, other params will be ignored. + :param create: create TTL in seconds (float) or milliseconds (int), + :param update: Create TTL in seconds (float) or milliseconds (int), + :param access: Create TTL in seconds (float) or milliseconds (int). + :return: cache decorator with expiry policy set. + """ if not self.client.protocol_context.is_expiry_policy_supported(): raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") diff --git a/pyignite/client.py b/pyignite/client.py index 44a69da..099b44d 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -736,8 +736,8 @@ def sql( def get_cluster(self) -> 'Cluster': """ - Gets client cluster facade. + Get client cluster facade. - :return: Client cluster facade. + :return: :py:class:`~pyignite.cluster.Cluster` instance. """ return Cluster(self) diff --git a/pyignite/cluster.py b/pyignite/cluster.py index 5aefcc4..d953b5c 100644 --- a/pyignite/cluster.py +++ b/pyignite/cluster.py @@ -31,6 +31,9 @@ class Cluster: """ def __init__(self, client: 'Client'): + """ + :param client: :py:class:`~pyignite.client.Client` instance. + """ self._client = client @status_to_exception(ClusterError) @@ -38,8 +41,10 @@ def get_state(self) -> 'ClusterState': """ Gets current cluster state. - :return: Current cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :return: Current cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return cluster_get_state(self._client.random_node) @@ -51,7 +56,9 @@ def set_state(self, state: 'ClusterState'): Note: Deactivation clears in-memory caches (without persistence) including the system caches. - :param state: New cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :param state: New cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return cluster_set_state(self._client.random_node, state) diff --git a/pyignite/datatypes/cluster_state.py b/pyignite/datatypes/cluster_state.py index 863a1d2..def5591 100644 --- a/pyignite/datatypes/cluster_state.py +++ b/pyignite/datatypes/cluster_state.py @@ -17,6 +17,10 @@ class ClusterState(IntEnum): + """ + Cluster states. + """ + #: Cluster deactivated. Cache operations aren't allowed. INACTIVE = 0 diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py index 38344d5..3572754 100644 --- a/pyignite/datatypes/expiry_policy.py +++ b/pyignite/datatypes/expiry_policy.py @@ -35,17 +35,28 @@ def _write_duration(stream, value): @attr.s class ExpiryPolicy: + """ + Set expiry policy for the cache. + """ + #: Set TTL unchanged. UNCHANGED = -2 + + #: Set TTL eternal. ETERNAL = -1 + #: Set TTL for create in seconds(float) or millis(int) create = attr.ib(kw_only=True, default=UNCHANGED, validator=[attr.validators.instance_of((int, float)), _positive]) + + #: Set TTL for update in seconds(float) or millis(int) update = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], validator=[attr.validators.instance_of((int, float)), _positive]) + + #: Set TTL for access in seconds(float) or millis(int) access = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], validator=[attr.validators.instance_of((int, float)), _positive]) - class CType(ctypes.LittleEndianStructure): + class _CType(ctypes.LittleEndianStructure): _pack_ = 1 _fields_ = [ ('not_null', ctypes.c_byte), @@ -59,8 +70,8 @@ def parse(cls, stream): init = stream.tell() not_null = int.from_bytes(stream.slice(init, 1), byteorder=PROTOCOL_BYTE_ORDER) if not_null: - stream.seek(ctypes.sizeof(ExpiryPolicy.CType), SEEK_CUR) - return ExpiryPolicy.CType + stream.seek(ctypes.sizeof(ExpiryPolicy._CType), SEEK_CUR) + return ExpiryPolicy._CType stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) return ctypes.c_byte From a83b5a8e3273880a0c29808ff5a14a76a0336d9e Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 22 Apr 2021 21:56:40 +0300 Subject: [PATCH 6/6] IGNITE-14595 Code review fixes. --- docs/async_examples.rst | 6 +++--- docs/examples.rst | 8 ++++---- examples/expiry_policy.py | 31 +++++++++++++++++-------------- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/docs/async_examples.rst b/docs/async_examples.rst index f503b7c..4bc21ae 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -62,13 +62,13 @@ in cache settings dictionary on creation. .. literalinclude:: ../examples/expiry_policy.py :language: python - :dedent: 8 + :dedent: 12 :lines: 72-75 .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 78-86 + :lines: 81-89 Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use :py:meth:`~pyignite.cache.BaseCache.with_expire_policy` @@ -76,7 +76,7 @@ Secondly, expiry policy can be set for all cache operations, which are done unde .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 93-102 + :lines: 96-105 SQL --- diff --git a/docs/examples.rst b/docs/examples.rst index bd65a84..07ec65c 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -96,13 +96,13 @@ in cache settings dictionary on creation. .. literalinclude:: ../examples/expiry_policy.py :language: python - :dedent: 8 - :lines: 32-35 + :dedent: 12 + :lines: 31-34 .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 38-44 + :lines: 40-46 Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use :py:meth:`~pyignite.cache.BaseCache.with_expire_policy` @@ -110,7 +110,7 @@ Secondly, expiry policy can be set for all cache operations, which are done unde .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 51-58 + :lines: 53-60 Scan ==== diff --git a/examples/expiry_policy.py b/examples/expiry_policy.py index 6dd3eb4..2002da1 100644 --- a/examples/expiry_policy.py +++ b/examples/expiry_policy.py @@ -18,6 +18,7 @@ from pyignite import Client, AioClient from pyignite.datatypes import ExpiryPolicy from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY +from pyignite.exceptions import NotSupportedByClusterError def main(): @@ -25,14 +26,15 @@ def main(): client = Client() with client.connect('127.0.0.1', 10800): - if not client.protocol_context.is_expiry_policy_supported(): - print("'ExpiryPolicy' is not supported by cluster.") - print("Create cache with expiry policy.") - ttl_cache = client.create_cache({ - PROP_NAME: 'test', - PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) - }) + try: + ttl_cache = client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + }) + except NotSupportedByClusterError: + print("'ExpiryPolicy' API is not supported by cluster. Finishing...") + return try: ttl_cache.put(1, 1) @@ -65,14 +67,15 @@ async def async_main(): client = AioClient() async with client.connect('127.0.0.1', 10800): - if not client.protocol_context.is_expiry_policy_supported(): - print("'ExpiryPolicy' is not supported by cluster.") - print("Create cache with expiry policy.") - ttl_cache = await client.create_cache({ - PROP_NAME: 'test', - PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) - }) + try: + ttl_cache = await client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + }) + except NotSupportedByClusterError: + print("'ExpiryPolicy' API is not supported by cluster. Finishing...") + return try: await ttl_cache.put(1, 1)