diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index c3b180d..33e1e11 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -26,7 +26,7 @@ RUN \ USER vscode ENV PATH=/home/vscode/.local/bin:$PATH WORKDIR /home/vscode -COPY --chown=vscode:nogroup src/orcabridge/requirements.txt /tmp/requirements.txt +COPY --chown=vscode:nogroup src/orcapod/requirements.txt /tmp/requirements.txt RUN \ # python setup curl -LsSf https://astral.sh/uv/install.sh | sh && \ diff --git a/misc/demo_redis_mocking.py b/misc/demo_redis_mocking.py index cc18dcb..2fd1f92 100644 --- a/misc/demo_redis_mocking.py +++ b/misc/demo_redis_mocking.py @@ -72,10 +72,10 @@ def demonstrate_redis_mocking(): # Patch the Redis availability and exceptions with ( - patch("orcabridge.hashing.string_cachers.REDIS_AVAILABLE", True), - patch("orcabridge.hashing.string_cachers.redis.RedisError", MockRedisError), + patch("orcapod.hashing.string_cachers.REDIS_AVAILABLE", True), + patch("orcapod.hashing.string_cachers.redis.RedisError", MockRedisError), patch( - "orcabridge.hashing.string_cachers.redis.ConnectionError", + "orcapod.hashing.string_cachers.redis.ConnectionError", MockConnectionError, ), ): diff --git a/notebooks/01_orcapod_core_concepts copy.ipynb b/notebooks/old_tutorials/01_orcapod_core_concepts copy.ipynb similarity index 100% rename from notebooks/01_orcapod_core_concepts copy.ipynb rename to notebooks/old_tutorials/01_orcapod_core_concepts copy.ipynb diff --git a/notebooks/02_orcapod_basic_usage copy.ipynb b/notebooks/old_tutorials/02_orcapod_basic_usage copy.ipynb similarity index 100% rename from notebooks/02_orcapod_basic_usage copy.ipynb rename to notebooks/old_tutorials/02_orcapod_basic_usage copy.ipynb diff --git a/notebooks/02_orcapod_basic_usage.ipynb b/notebooks/old_tutorials/02_orcapod_basic_usage.ipynb similarity index 100% rename from notebooks/02_orcapod_basic_usage.ipynb rename to notebooks/old_tutorials/02_orcapod_basic_usage.ipynb diff --git a/notebooks/03_orcacapod_qol_features.ipynb b/notebooks/old_tutorials/03_orcacapod_qol_features.ipynb similarity index 100% rename from notebooks/03_orcacapod_qol_features.ipynb rename to notebooks/old_tutorials/03_orcacapod_qol_features.ipynb diff --git a/notebooks/04_orcapod_tracker.ipynb b/notebooks/old_tutorials/04_orcapod_tracker.ipynb similarity index 100% rename from notebooks/04_orcapod_tracker.ipynb rename to notebooks/old_tutorials/04_orcapod_tracker.ipynb diff --git a/notebooks/05_orcabridge_dj_integration.ipynb b/notebooks/old_tutorials/05_orcabridge_dj_integration.ipynb similarity index 100% rename from notebooks/05_orcabridge_dj_integration.ipynb rename to notebooks/old_tutorials/05_orcabridge_dj_integration.ipynb diff --git a/notebooks/tutorials/01_quick_dive_into_orcapod.ipynb b/notebooks/tutorials/01_quick_dive_into_orcapod.ipynb new file mode 100644 index 0000000..b09f745 --- /dev/null +++ b/notebooks/tutorials/01_quick_dive_into_orcapod.ipynb @@ -0,0 +1,876 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "27cdd37d", + "metadata": {}, + "outputs": [], + "source": [ + "import orcapod as op" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "9cd4692c", + "metadata": {}, + "outputs": [], + "source": [ + "N = 10\n", + "stream = op.SyncStreamFromLists(\n", + " tags=[{\"id\": i} for i in range(N)],\n", + " packets=[{\"x\": i, \"y\": i + 1} for i in range(N)],\n", + " tag_typespec={\"id\": int},\n", + " packet_typespec={\"x\": int, \"y\": int},\n", + " label=\"MySource\",\n", + ")\n", + "\n", + "word_stream = op.SyncStreamFromLists(\n", + " tags=[{\"id\": i} for i in range(N)],\n", + " packets=[{\"word1\": f\"hello {i}\", \"word2\": f\"world {i}\"} for i in range(N)],\n", + " tag_typespec={\"id\": int},\n", + " packet_typespec={\"word1\": str, \"word2\": str},\n", + " label=\"HelloWorld\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78ab941b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'id': 0} {'x': 0, 'y': 1}\n", + "{'id': 1} {'x': 1, 'y': 2}\n", + "{'id': 2} {'x': 2, 'y': 3}\n", + "{'id': 3} {'x': 3, 'y': 4}\n", + "{'id': 4} {'x': 4, 'y': 5}\n", + "{'id': 5} {'x': 5, 'y': 6}\n", + "{'id': 6} {'x': 6, 'y': 7}\n", + "{'id': 7} {'x': 7, 'y': 8}\n", + "{'id': 8} {'x': 8, 'y': 9}\n", + "{'id': 9} {'x': 9, 'y': 10}\n" + ] + } + ], + "source": [ + "for tag, packet in stream:\n", + " print(tag, packet)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ef13511e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'id': 0} {'word1': 'hello 0', 'word2': 'world 0'}\n", + "{'id': 1} {'word1': 'hello 1', 'word2': 'world 1'}\n", + "{'id': 2} {'word1': 'hello 2', 'word2': 'world 2'}\n", + "{'id': 3} {'word1': 'hello 3', 'word2': 'world 3'}\n", + "{'id': 4} {'word1': 'hello 4', 'word2': 'world 4'}\n", + "{'id': 5} {'word1': 'hello 5', 'word2': 'world 5'}\n", + "{'id': 6} {'word1': 'hello 6', 'word2': 'world 6'}\n", + "{'id': 7} {'word1': 'hello 7', 'word2': 'world 7'}\n", + "{'id': 8} {'word1': 'hello 8', 'word2': 'world 8'}\n", + "{'id': 9} {'word1': 'hello 9', 'word2': 'world 9'}\n" + ] + } + ], + "source": [ + "for tag, packet in word_stream:\n", + " print(tag, packet)" + ] + }, + { + "cell_type": "markdown", + "id": "ea7eb5ed", + "metadata": {}, + "source": [ + "## Defining function pods" + ] + }, + { + "cell_type": "markdown", + "id": "891bbadf", + "metadata": {}, + "source": [ + "Now we define our own function pods to perform simple computation. \n", + "Defining a function pod is quite simple, you simply \n", + "1. define a regular function with type annotations\n", + "2. decorate with `op.function_pod`, passing in the name ('key') for the output value(s)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "f8781072", + "metadata": {}, + "outputs": [], + "source": [ + "@op.function_pod(\"total\")\n", + "def total(x: int, y: int) -> int:\n", + " return x + y\n", + "\n", + "\n", + "@op.function_pod(\"delta\")\n", + "def delta(x: int, y: int) -> int:\n", + " return 2 * y - x\n", + "\n", + "\n", + "@op.function_pod(\"mult\")\n", + "def mult(x: int, y: int) -> int:\n", + " return x * y\n", + "\n", + "\n", + "@op.function_pod(\"concat_string\")\n", + "def concat(x: str, y: str) -> str:\n", + " return x + y\n" + ] + }, + { + "cell_type": "markdown", + "id": "bd843166", + "metadata": {}, + "source": [ + "Wrapped functions are now `FunctionPod` and expects to be called with streams as inputs. You can still access the original function through its `function` attribute." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7b8f8056", + "metadata": {}, + "outputs": [ + { + "ename": "TypeError", + "evalue": "Expected SyncStream, got int for stream 5", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mTypeError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 2\u001b[39m\n\u001b[32m 1\u001b[39m \u001b[38;5;66;03m# this won't work, because it's expecting a stream as input\u001b[39;00m\n\u001b[32m----> \u001b[39m\u001b[32m2\u001b[39m \u001b[43mtotal\u001b[49m\u001b[43m(\u001b[49m\u001b[32;43m5\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m6\u001b[39;49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/base.py:60\u001b[39m, in \u001b[36mKernel.__call__\u001b[39m\u001b[34m(self, label, *streams, **kwargs)\u001b[39m\n\u001b[32m 58\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m stream \u001b[38;5;129;01min\u001b[39;00m streams:\n\u001b[32m 59\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(stream, SyncStream):\n\u001b[32m---> \u001b[39m\u001b[32m60\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[32m 61\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mExpected SyncStream, got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(stream).\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m for stream \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mstream\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 62\u001b[39m )\n\u001b[32m 63\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(stream, Source):\n\u001b[32m 64\u001b[39m \u001b[38;5;66;03m# if the stream is a Source, instantiate it\u001b[39;00m\n\u001b[32m 65\u001b[39m stream = stream()\n", + "\u001b[31mTypeError\u001b[39m: Expected SyncStream, got int for stream 5" + ] + } + ], + "source": [ + "# this won't work, because it's expecting a stream as input\n", + "total(5, 6)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fba23537", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "11" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# but you can access original function this way\n", + "total.function(5, 6)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e56ffa7d", + "metadata": {}, + "outputs": [], + "source": [ + "# Passing a stream into a pod does NOT immediately trigger execution, but rather returns another stream\n", + "\n", + "total_stream = total(stream)" + ] + }, + { + "cell_type": "markdown", + "id": "0af7a165", + "metadata": {}, + "source": [ + "Iterating through the stream or calling `flow` triggers the computation" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4c9017c9", + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'total_stream' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mNameError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[6]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m tag, packet \u001b[38;5;129;01min\u001b[39;00m \u001b[43mtotal_stream\u001b[49m:\n\u001b[32m 2\u001b[39m \u001b[38;5;28mprint\u001b[39m(tag, packet)\n", + "\u001b[31mNameError\u001b[39m: name 'total_stream' is not defined" + ] + } + ], + "source": [ + "for tag, packet in total_stream:\n", + " print(tag, packet)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "59104716", + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'total_stream' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mNameError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[43mtotal_stream\u001b[49m.flow()\n", + "\u001b[31mNameError\u001b[39m: name 'total_stream' is not defined" + ] + } + ], + "source": [ + "total_stream.flow()" + ] + }, + { + "cell_type": "markdown", + "id": "d1013dd1", + "metadata": {}, + "source": [ + "If you try to pass in an incompatible stream (stream whose packets don't match the expected inputs of the function), you will immediately get an error." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "77547b4d", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Key 'word1' not found in parameter types.\n" + ] + }, + { + "ename": "TypeError", + "evalue": "Input packet types {'word1': , 'word2': } is not compatible with the function's expected input types {'x': , 'y': }", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mTypeError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[8]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m total_stream = \u001b[43mtotal\u001b[49m\u001b[43m(\u001b[49m\u001b[43mword_stream\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/base.py:75\u001b[39m, in \u001b[36mKernel.__call__\u001b[39m\u001b[34m(self, label, *streams, **kwargs)\u001b[39m\n\u001b[32m 69\u001b[39m normalized_streams = [\n\u001b[32m 70\u001b[39m stream() \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(stream, Source) \u001b[38;5;28;01melse\u001b[39;00m stream\n\u001b[32m 71\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m stream \u001b[38;5;129;01min\u001b[39;00m verified_streams\n\u001b[32m 72\u001b[39m ]\n\u001b[32m 74\u001b[39m pre_processed_streams = \u001b[38;5;28mself\u001b[39m.pre_forward_hook(*normalized_streams, **kwargs)\n\u001b[32m---> \u001b[39m\u001b[32m75\u001b[39m output_stream = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43mpre_processed_streams\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 76\u001b[39m post_processed_stream = \u001b[38;5;28mself\u001b[39m.post_forward_hook(output_stream, **kwargs)\n\u001b[32m 77\u001b[39m \u001b[38;5;66;03m# create an invocation instance\u001b[39;00m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/pod.py:236\u001b[39m, in \u001b[36mFunctionPod.forward\u001b[39m\u001b[34m(self, *streams, **kwargs)\u001b[39m\n\u001b[32m 232\u001b[39m _, packet_typespec = stream.types(trigger_run=\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[32m 233\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m packet_typespec \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m check_typespec_compatibility(\n\u001b[32m 234\u001b[39m packet_typespec, \u001b[38;5;28mself\u001b[39m.function_input_typespec\n\u001b[32m 235\u001b[39m ):\n\u001b[32m--> \u001b[39m\u001b[32m236\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[32m 237\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mInput packet types \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket_typespec\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m is not compatible with the function\u001b[39m\u001b[33m'\u001b[39m\u001b[33ms expected input types \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m.function_input_typespec\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 238\u001b[39m )\n\u001b[32m 239\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m().forward(*streams, **kwargs)\n", + "\u001b[31mTypeError\u001b[39m: Input packet types {'word1': , 'word2': } is not compatible with the function's expected input types {'x': , 'y': }" + ] + } + ], + "source": [ + "total_stream = total(word_stream)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "4c9c030a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "({'id': int}, {'x': int, 'y': int})" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# you can check the tag and packet types of the stream\n", + "stream.types()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "34338baf", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "({'id': int}, {'x': int, 'y': int})" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# you can check the tag and packet types of the stream\n", + "stream.types()" + ] + }, + { + "cell_type": "markdown", + "id": "3ba299b2", + "metadata": {}, + "source": [ + "## Defining pipeline" + ] + }, + { + "cell_type": "markdown", + "id": "1e1dd036", + "metadata": {}, + "source": [ + "We will now piece together multiple function pods into a pipeline. We do this by instantiating a `Pipeline` object. We will store the results into a simple data store." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "8083f54a", + "metadata": {}, + "outputs": [], + "source": [ + "# Use simple data store, saving data to Parquet files\n", + "from orcapod.stores.delta_table_arrow_data_store import DeltaTableArrowDataStore\n", + "\n", + "pipeline_store = DeltaTableArrowDataStore(\"./delta_store\", batch_size=100)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "a475308c", + "metadata": {}, + "outputs": [], + "source": [ + "pipeline = op.Pipeline(\"test_pipeline\", pipeline_store)\n" + ] + }, + { + "cell_type": "markdown", + "id": "a42158b9", + "metadata": {}, + "source": [ + "Now we have a pipeline object, we can use it to define our pipeline by simply \"chaining\" together function pod calls." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f923ecf1", + "metadata": {}, + "outputs": [], + "source": [ + "with pipeline:\n", + " total_stream = total(stream)\n", + " delta_stream = delta(stream)\n", + " mult_stream = mult(\n", + " total_stream.map({\"total\": \"x\"}), delta_stream.map({\"delta\": \"y\"})\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "b67e9413", + "metadata": {}, + "source": [ + "And that's it! Now the elements of the pipeline is available as properties on the pipeline." + ] + }, + { + "cell_type": "markdown", + "id": "7ee41a20", + "metadata": {}, + "source": [ + "By default, the function pods are made available under the function's name in the pipeline." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "64746ada", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error processing packet {'x': 8, 'y': 9}: Memoizing single packet return 2 packets!\n" + ] + }, + { + "ename": "AssertionError", + "evalue": "Memoizing single packet return 2 packets!", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mAssertionError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[14]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[43mpipeline\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/pipeline/pipeline.py:217\u001b[39m, in \u001b[36mPipeline.run\u001b[39m\u001b[34m(self, full_sync)\u001b[39m\n\u001b[32m 215\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m full_sync:\n\u001b[32m 216\u001b[39m node.reset_cache()\n\u001b[32m--> \u001b[39m\u001b[32m217\u001b[39m \u001b[43mnode\u001b[49m\u001b[43m.\u001b[49m\u001b[43mflow\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 219\u001b[39m \u001b[38;5;28mself\u001b[39m.flush()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/base.py:336\u001b[39m, in \u001b[36mStream.flow\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 331\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mflow\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Collection[\u001b[38;5;28mtuple\u001b[39m[Tag, Packet]]:\n\u001b[32m 332\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 333\u001b[39m \u001b[33;03m Flow everything through the stream, returning the entire collection of\u001b[39;00m\n\u001b[32m 334\u001b[39m \u001b[33;03m (Tag, Packet) as a collection. This will tigger any upstream computation of the stream.\u001b[39;00m\n\u001b[32m 335\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m336\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m[\u001b[49m\u001b[43me\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43me\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m]\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/base.py:590\u001b[39m, in \u001b[36mSource.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 586\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Iterator[\u001b[38;5;28mtuple\u001b[39m[Tag, Packet]]:\n\u001b[32m 587\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 588\u001b[39m \u001b[33;03m Simple iter method that allows for Source object to act as a stream.\u001b[39;00m\n\u001b[32m 589\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m590\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/streams.py:99\u001b[39m, in \u001b[36mSyncStreamFromGenerator.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 97\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Iterator[\u001b[38;5;28mtuple\u001b[39m[Tag, Packet]]:\n\u001b[32m 98\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m.check_consistency:\n\u001b[32m---> \u001b[39m\u001b[32m99\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m.generator_factory()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/pod.py:107\u001b[39m, in \u001b[36mPod.forward..generator\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 105\u001b[39m logger.error(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mError processing packet \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 106\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.error_handling == \u001b[33m\"\u001b[39m\u001b[33mraise\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m107\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[32m 108\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.error_handling == \u001b[33m\"\u001b[39m\u001b[33mwarn\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m 109\u001b[39m warnings.warn(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mError processing packet \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/pod.py:94\u001b[39m, in \u001b[36mPod.forward..generator\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 92\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m tag, packet \u001b[38;5;129;01min\u001b[39;00m stream:\n\u001b[32m 93\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m94\u001b[39m tag, output_packet = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtag\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpacket\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 95\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m output_packet \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 96\u001b[39m logger.debug(\n\u001b[32m 97\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mCall returned None as output for tag \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtag\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m. Skipping...\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 98\u001b[39m )\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/pipeline/nodes.py:629\u001b[39m, in \u001b[36mCachedFunctionPodWrapper.call\u001b[39m\u001b[34m(self, tag, packet)\u001b[39m\n\u001b[32m 627\u001b[39m output_packet = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 628\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m.skip_memoization_lookup:\n\u001b[32m--> \u001b[39m\u001b[32m629\u001b[39m output_packet = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_retrieve_memoized_with_packet_key\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpacket_key\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 630\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m output_packet \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 631\u001b[39m logger.debug(\n\u001b[32m 632\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mMemoized output for \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m with \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket_key\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m found, skipping computation\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 633\u001b[39m )\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/pipeline/nodes.py:573\u001b[39m, in \u001b[36mCachedFunctionPodWrapper._retrieve_memoized_with_packet_key\u001b[39m\u001b[34m(self, packet_key)\u001b[39m\n\u001b[32m 571\u001b[39m packets = \u001b[38;5;28mself\u001b[39m.output_converter.from_arrow_table_to_python_packets(arrow_table)\n\u001b[32m 572\u001b[39m \u001b[38;5;66;03m# since memoizing single packet, it should only contain one packet\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m573\u001b[39m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(packets) == \u001b[32m1\u001b[39m, (\n\u001b[32m 574\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mMemoizing single packet return \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(packets)\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m packets!\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 575\u001b[39m )\n\u001b[32m 576\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m packets[\u001b[32m0\u001b[39m]\n", + "\u001b[31mAssertionError\u001b[39m: Memoizing single packet return 2 packets!" + ] + } + ], + "source": [ + "pipeline.run()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "66230603", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "FunctionPodNode>" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.total" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "6587f2f2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "FunctionPodNode>" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.mult" + ] + }, + { + "cell_type": "markdown", + "id": "16d0dba3", + "metadata": {}, + "source": [ + "Other implicitly created nodes such as joining of two streams are made available under the corresponding operator class (e.g. `Join`)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "bd0dfba2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "KernelNode" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.Join" + ] + }, + { + "cell_type": "markdown", + "id": "71dba5c5", + "metadata": {}, + "source": [ + "You can list out all nodes through `nodes` property" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "e22758ab", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'MySource': KernelNode,\n", + " 'total': FunctionPodNode>,\n", + " 'delta': FunctionPodNode>,\n", + " 'MapPackets_0': KernelNode,\n", + " 'MapPackets_1': KernelNode,\n", + " 'Join': KernelNode,\n", + " 'mult': FunctionPodNode>}" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.nodes" + ] + }, + { + "cell_type": "markdown", + "id": "039b617f", + "metadata": {}, + "source": [ + "You can easily rename any node using the pipeline's `rename` method" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "0d1a470e", + "metadata": {}, + "outputs": [], + "source": [ + "pipeline.rename(\"MapPackets_0\", \"total_map\")\n", + "pipeline.rename(\"MapPackets_1\", \"mult_map\")" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "3a43984d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'MySource': KernelNode,\n", + " 'total': FunctionPodNode>,\n", + " 'delta': FunctionPodNode>,\n", + " 'Join': KernelNode,\n", + " 'mult': FunctionPodNode>,\n", + " 'total_map': KernelNode,\n", + " 'mult_map': KernelNode}" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.nodes" + ] + }, + { + "cell_type": "markdown", + "id": "c438f111", + "metadata": {}, + "source": [ + "Renaming does NOT change the structure of the pipeline in anyway -- it simply changes how it's labeld for your convenience." + ] + }, + { + "cell_type": "markdown", + "id": "befa6107", + "metadata": {}, + "source": [ + "### Running pipeline and accessing results" + ] + }, + { + "cell_type": "markdown", + "id": "4d4412b1", + "metadata": {}, + "source": [ + "Since we just created the pipeline, there are no results associated with any node. You can get [Polars](https://pola.rs) DataFrame viewing into the results through the node's `df` attribute." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "96106e09", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Flushing triggered!!\n" + ] + } + ], + "source": [ + "pipeline.total.df" + ] + }, + { + "cell_type": "markdown", + "id": "62b7e59a", + "metadata": {}, + "source": [ + "Before we run, the source nodes is also not \"recorded\" and thus will appear empty." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "33b449b6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Flushing triggered!!\n" + ] + } + ], + "source": [ + "pipeline.MySource.df" + ] + }, + { + "cell_type": "markdown", + "id": "408e8012", + "metadata": {}, + "source": [ + "We can trigger the entire pipeline to run and record all results by simply calling the `run` method." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "189f943f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n", + "Flushing triggered!!\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Error processing packet {'x': 8, 'y': 9}: Memoizing single packet return 2 packets!\n" + ] + }, + { + "ename": "AssertionError", + "evalue": "Memoizing single packet return 2 packets!", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mAssertionError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[18]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[43mpipeline\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/pipeline/pipeline.py:217\u001b[39m, in \u001b[36mPipeline.run\u001b[39m\u001b[34m(self, full_sync)\u001b[39m\n\u001b[32m 215\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m full_sync:\n\u001b[32m 216\u001b[39m node.reset_cache()\n\u001b[32m--> \u001b[39m\u001b[32m217\u001b[39m \u001b[43mnode\u001b[49m\u001b[43m.\u001b[49m\u001b[43mflow\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 219\u001b[39m \u001b[38;5;28mself\u001b[39m.flush()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/base.py:336\u001b[39m, in \u001b[36mStream.flow\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 331\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mflow\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Collection[\u001b[38;5;28mtuple\u001b[39m[Tag, Packet]]:\n\u001b[32m 332\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 333\u001b[39m \u001b[33;03m Flow everything through the stream, returning the entire collection of\u001b[39;00m\n\u001b[32m 334\u001b[39m \u001b[33;03m (Tag, Packet) as a collection. This will tigger any upstream computation of the stream.\u001b[39;00m\n\u001b[32m 335\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m336\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m[\u001b[49m\u001b[43me\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43me\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m]\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/base.py:590\u001b[39m, in \u001b[36mSource.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 586\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Iterator[\u001b[38;5;28mtuple\u001b[39m[Tag, Packet]]:\n\u001b[32m 587\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 588\u001b[39m \u001b[33;03m Simple iter method that allows for Source object to act as a stream.\u001b[39;00m\n\u001b[32m 589\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m590\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/streams.py:99\u001b[39m, in \u001b[36mSyncStreamFromGenerator.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 97\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> Iterator[\u001b[38;5;28mtuple\u001b[39m[Tag, Packet]]:\n\u001b[32m 98\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m.check_consistency:\n\u001b[32m---> \u001b[39m\u001b[32m99\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m.generator_factory()\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/pod.py:107\u001b[39m, in \u001b[36mPod.forward..generator\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 105\u001b[39m logger.error(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mError processing packet \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 106\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.error_handling == \u001b[33m\"\u001b[39m\u001b[33mraise\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m107\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[32m 108\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.error_handling == \u001b[33m\"\u001b[39m\u001b[33mwarn\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m 109\u001b[39m warnings.warn(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mError processing packet \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/core/pod.py:94\u001b[39m, in \u001b[36mPod.forward..generator\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 92\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m tag, packet \u001b[38;5;129;01min\u001b[39;00m stream:\n\u001b[32m 93\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m94\u001b[39m tag, output_packet = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtag\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpacket\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 95\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m output_packet \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 96\u001b[39m logger.debug(\n\u001b[32m 97\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mCall returned None as output for tag \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtag\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m. Skipping...\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 98\u001b[39m )\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/pipeline/nodes.py:629\u001b[39m, in \u001b[36mCachedFunctionPodWrapper.call\u001b[39m\u001b[34m(self, tag, packet)\u001b[39m\n\u001b[32m 627\u001b[39m output_packet = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 628\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m.skip_memoization_lookup:\n\u001b[32m--> \u001b[39m\u001b[32m629\u001b[39m output_packet = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_retrieve_memoized_with_packet_key\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpacket_key\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 630\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m output_packet \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 631\u001b[39m logger.debug(\n\u001b[32m 632\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mMemoized output for \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m with \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpacket_key\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m found, skipping computation\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 633\u001b[39m )\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/workspace/orcapod-python/src/orcapod/pipeline/nodes.py:573\u001b[39m, in \u001b[36mCachedFunctionPodWrapper._retrieve_memoized_with_packet_key\u001b[39m\u001b[34m(self, packet_key)\u001b[39m\n\u001b[32m 571\u001b[39m packets = \u001b[38;5;28mself\u001b[39m.output_converter.from_arrow_table_to_python_packets(arrow_table)\n\u001b[32m 572\u001b[39m \u001b[38;5;66;03m# since memoizing single packet, it should only contain one packet\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m573\u001b[39m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(packets) == \u001b[32m1\u001b[39m, (\n\u001b[32m 574\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mMemoizing single packet return \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(packets)\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m packets!\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 575\u001b[39m )\n\u001b[32m 576\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m packets[\u001b[32m0\u001b[39m]\n", + "\u001b[31mAssertionError\u001b[39m: Memoizing single packet return 2 packets!" + ] + } + ], + "source": [ + "pipeline.run()" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "1674bec4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "shape: (10, 3)
idxy
i64i64i64
001
112
223
334
445
556
667
778
889
9910
" + ], + "text/plain": [ + "shape: (10, 3)\n", + "┌─────┬─────┬─────┐\n", + "│ id ┆ x ┆ y │\n", + "│ --- ┆ --- ┆ --- │\n", + "│ i64 ┆ i64 ┆ i64 │\n", + "╞═════╪═════╪═════╡\n", + "│ 0 ┆ 0 ┆ 1 │\n", + "│ 1 ┆ 1 ┆ 2 │\n", + "│ 2 ┆ 2 ┆ 3 │\n", + "│ 3 ┆ 3 ┆ 4 │\n", + "│ 4 ┆ 4 ┆ 5 │\n", + "│ 5 ┆ 5 ┆ 6 │\n", + "│ 6 ┆ 6 ┆ 7 │\n", + "│ 7 ┆ 7 ┆ 8 │\n", + "│ 8 ┆ 8 ┆ 9 │\n", + "│ 9 ┆ 9 ┆ 10 │\n", + "└─────┴─────┴─────┘" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.MySource.df" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "2b69d213", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "shape: (10, 2)
idtotal
i64i64
01
13
25
37
49
511
613
715
817
919
" + ], + "text/plain": [ + "shape: (10, 2)\n", + "┌─────┬───────┐\n", + "│ id ┆ total │\n", + "│ --- ┆ --- │\n", + "│ i64 ┆ i64 │\n", + "╞═════╪═══════╡\n", + "│ 0 ┆ 1 │\n", + "│ 1 ┆ 3 │\n", + "│ 2 ┆ 5 │\n", + "│ 3 ┆ 7 │\n", + "│ 4 ┆ 9 │\n", + "│ 5 ┆ 11 │\n", + "│ 6 ┆ 13 │\n", + "│ 7 ┆ 15 │\n", + "│ 8 ┆ 17 │\n", + "│ 9 ┆ 19 │\n", + "└─────┴───────┘" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline.total.df" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "orcapod (3.13.3)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml index ca1c20c..aa23332 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ dependencies = [ "pandas>=2.2.3", "pyyaml>=6.0.2", "pyarrow>=20.0.0", - "polars>=1.30.0", + "polars>=1.31.0", "beartype>=0.21.0", ] readme = "README.md" diff --git a/src/orcapod/__init__.py b/src/orcapod/__init__.py index a84492a..b49b19c 100644 --- a/src/orcapod/__init__.py +++ b/src/orcapod/__init__.py @@ -1,34 +1,17 @@ -from .core import operators, sources, streams -from .core.streams import SyncStreamFromLists, SyncStreamFromGenerator -from . import hashing, pod, store -from .core.operators import Join, MapPackets, MapTags, packet, tag -from .pod import FunctionPod, function_pod -from .core.sources import GlobSource -from .store import DirDataStore, SafeDirDataStore -from .core.tracker import GraphTracker +from .data import DEFAULT_TRACKER_MANAGER +from .data.pods import function_pod, FunctionPod, CachedPod +from .data import streams +from .stores.delta_lake_stores import BasicDeltaTableArrowStore -DEFAULT_TRACKER = GraphTracker() -DEFAULT_TRACKER.activate() +no_tracking = DEFAULT_TRACKER_MANAGER.no_tracking __all__ = [ - "hashing", - "store", - "pod", - "operators", - "streams", - "sources", - "MapTags", - "MapPackets", - "Join", - "tag", - "packet", - "FunctionPod", + "DEFAULT_TRACKER_MANAGER", + "no_tracking", "function_pod", - "GlobSource", - "DirDataStore", - "SafeDirDataStore", - "DEFAULT_TRACKER", - "SyncStreamFromLists", - "SyncStreamFromGenerator", + "FunctionPod", + "CachedPod", + "streams", + "BasicDeltaTableArrowStore", ] diff --git a/src/orcapod/core/__init__.py b/src/orcapod/core/__init__.py index e69de29..d236681 100644 --- a/src/orcapod/core/__init__.py +++ b/src/orcapod/core/__init__.py @@ -0,0 +1,13 @@ +from .base import Kernel, Invocation, Stream, SyncStream, Source +from .operators import Operator +from .pod import Pod + +__all__ = [ + "Kernel", + "Operator", + "Invocation", + "Stream", + "SyncStream", + "Source", + "Pod", +] diff --git a/src/orcapod/core/base.py b/src/orcapod/core/base.py index 0b1ed63..367bc72 100644 --- a/src/orcapod/core/base.py +++ b/src/orcapod/core/base.py @@ -1,21 +1,19 @@ -# Collection of base classes for operations and streams in the orcabridge framework. +# Collection of base classes for operations and streams in the orcapod framework. import threading from abc import ABC, abstractmethod from collections.abc import Callable, Collection, Iterator -from typing import Any, TypeVar, Hashable +from typing import Any - -from orcapod.hashing import HashableMixin +from orcapod.hashing import ContentIdentifiableBase from orcapod.types import Packet, Tag, TypeSpec -from orcapod.utils.stream_utils import get_typespec - +from orcapod.types.typespec_utils import get_typespec_from_dict import logging logger = logging.getLogger(__name__) -class Kernel(ABC, HashableMixin): +class Kernel(ABC, ContentIdentifiableBase): """ Kernel defines the fundamental unit of computation that can be performed on zero, one or more streams of data. It is the base class for all computations and transformations that can be performed on a collection of streams @@ -27,42 +25,67 @@ class Kernel(ABC, HashableMixin): for computational graph tracking. """ - def __init__(self, label: str | None = None, **kwargs) -> None: + def __init__( + self, label: str | None = None, skip_tracking: bool = False, **kwargs + ) -> None: super().__init__(**kwargs) self._label = label + self._skip_tracking = skip_tracking - @property - def label(self) -> str: + def pre_forward_hook( + self, *streams: "SyncStream", **kwargs + ) -> tuple["SyncStream", ...]: """ - Returns a human-readable label for this kernel. - Default implementation returns the provided label or class name if no label was provided. + A hook that is called before the forward method is invoked. + This can be used to perform any pre-processing or validation on the input streams. + Subclasses can override this method to provide custom behavior. """ - if self._label: - return self._label - return self.__class__.__name__ + return streams - @label.setter - def label(self, label: str) -> None: - self._label = label + def post_forward_hook(self, output_stream: "SyncStream", **kwargs) -> "SyncStream": + """ + A hook that is called after the forward method is invoked. + This can be used to perform any post-processing on the output stream. + Subclasses can override this method to provide custom behavior. + """ + return output_stream + + def __call__( + self, *streams: "SyncStream", label: str | None = None, **kwargs + ) -> "SyncStream": + # check that inputs are stream instances and if it's source, instantiate it + verified_streams = [] + for stream in streams: + if not isinstance(stream, SyncStream): + raise TypeError( + f"Expected SyncStream, got {type(stream).__name__} for stream {stream}" + ) + if isinstance(stream, Source): + # if the stream is a Source, instantiate it + stream = stream() + verified_streams.append(stream) - def __call__(self, *streams: "SyncStream", **kwargs) -> "SyncStream": # Special handling of Source: trigger call on source if passed as stream normalized_streams = [ - stream() if isinstance(stream, Source) else stream for stream in streams + stream() if isinstance(stream, Source) else stream + for stream in verified_streams ] - output_stream = self.forward(*normalized_streams, **kwargs) + pre_processed_streams = self.pre_forward_hook(*normalized_streams, **kwargs) + output_stream = self.forward(*pre_processed_streams, **kwargs) + post_processed_stream = self.post_forward_hook(output_stream, **kwargs) # create an invocation instance - invocation = Invocation(self, normalized_streams) + invocation = Invocation(self, pre_processed_streams, label=label) # label the output_stream with the invocation that produced the stream - output_stream.invocation = invocation + post_processed_stream.invocation = invocation - # register the invocation to all active trackers - active_trackers = Tracker.get_active_trackers() - for tracker in active_trackers: - tracker.record(invocation) + if not self._skip_tracking: + # register the invocation to all active trackers + active_trackers = Tracker.get_active_trackers() + for tracker in active_trackers: + tracker.record(invocation) - return output_stream + return post_processed_stream @abstractmethod def forward(self, *streams: "SyncStream") -> "SyncStream": @@ -98,7 +121,7 @@ def identity_structure(self, *streams: "SyncStream") -> Any: logger.warning( f"Identity structure not implemented for {self.__class__.__name__}" ) - return (self.__class__.__name__,) + tuple(streams) + return (self.__class__.__name__,) + streams def keys( self, *streams: "SyncStream", trigger_run: bool = False @@ -134,7 +157,7 @@ def types( return None, None tag, packet = next(iter(self(*streams))) - return get_typespec(tag), get_typespec(packet) + return get_typespec_from_dict(tag), get_typespec_from_dict(packet) def claims_unique_tags( self, *streams: "SyncStream", trigger_run: bool = False @@ -204,7 +227,7 @@ def record(self, invocation: "Invocation") -> None: ... # This is NOT an abstract class, but rather a concrete class that # represents an invocation of a kernel on a collection of streams. -class Invocation(HashableMixin): +class Invocation(ContentIdentifiableBase): """ This class represents an invocation of a kernel on a collection of streams. It contains the kernel and the streams that were used in the invocation. @@ -217,12 +240,18 @@ def __init__( kernel: Kernel, # TODO: technically this should be Stream to stay consistent with Stream interface. Update to Stream when AsyncStream is implemented streams: Collection["SyncStream"], + **kwargs, ) -> None: + super().__init__(**kwargs) self.kernel = kernel self.streams = streams - def __hash__(self) -> int: - return super().__hash__() + def computed_label(self) -> str | None: + """ + Returns the computed label for this invocation. + This is used to provide a default label if no label is set. + """ + return self.kernel.label def __repr__(self) -> str: return f"Invocation(kernel={self.kernel}, streams={self.streams})" @@ -230,11 +259,6 @@ def __repr__(self) -> str: def __str__(self) -> str: return f"Invocation[ID:{self.__hash__()}]({self.kernel}, {self.streams})" - def __eq__(self, other: Any) -> bool: - if not isinstance(other, Invocation): - return False - return hash(self) == hash(other) - def __lt__(self, other: Any) -> bool: if not isinstance(other, Invocation): return NotImplemented @@ -271,7 +295,7 @@ def identity_structure(self) -> int: return self.kernel.identity_structure(*self.streams) -class Stream(ABC, HashableMixin): +class Stream(ABC, ContentIdentifiableBase): """ A stream is a collection of tagged-packets that are generated by an operation. The stream is iterable and can be used to access the packets in the stream. @@ -280,35 +304,15 @@ class Stream(ABC, HashableMixin): This may be None if the stream is not generated by a kernel (i.e. directly instantiated by a user). """ - def __init__(self, label: str | None = None, **kwargs) -> None: + def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self._invocation: Invocation | None = None - self._label = label - @property - def label(self) -> str: - """ - Returns a human-readable label for this stream. - If no label is provided and the stream is generated by an operation, - the label of the operation is used. - Otherwise, the class name is used as the label. - """ - if self._label is None: - if self.invocation is not None: - # use the invocation operation label - return self.invocation.kernel.label - else: - return self.__class__.__name__ - return self._label - - @label.setter - def label(self, label: str) -> None: - """ - Sets a human-readable label for this stream. - """ - if not isinstance(label, str): - raise TypeError("label must be a string") - self._label = label + def computed_label(self) -> str | None: + if self.invocation is not None: + # use the invocation operation label + return self.invocation.kernel.label + return None @property def invocation(self) -> Invocation | None: @@ -329,7 +333,7 @@ def flow(self) -> Collection[tuple[Tag, Packet]]: Flow everything through the stream, returning the entire collection of (Tag, Packet) as a collection. This will tigger any upstream computation of the stream. """ - return list(self) + return [e for e in self] # --------------------- Recursive methods --------------------------- # These methods form a step in the multi-class recursive invocation that follows the pattern of @@ -365,6 +369,8 @@ def keys( tag_keys, packet_keys = self.invocation.keys() if tag_keys is not None and packet_keys is not None: return tag_keys, packet_keys + if not trigger_run: + return None, None # otherwise, use the keys from the first packet in the stream # note that this may be computationally expensive tag, packet = next(iter(self)) @@ -386,10 +392,14 @@ def types(self, *, trigger_run=False) -> tuple[TypeSpec | None, TypeSpec | None] tag_types, packet_types = self.invocation.types() if not trigger_run or (tag_types is not None and packet_types is not None): return tag_types, packet_types + if not trigger_run: + return None, None # otherwise, use the keys from the first packet in the stream # note that this may be computationally expensive tag, packet = next(iter(self)) - return tag_types or get_typespec(tag), packet_types or get_typespec(packet) + return tag_types or get_typespec_from_dict( + tag + ), packet_types or get_typespec_from_dict(packet) def claims_unique_tags(self, *, trigger_run=False) -> bool | None: """ @@ -432,6 +442,71 @@ def __len__(self) -> int: """ return sum(1 for _ in self) + def join(self, other: "SyncStream", label: str | None = None) -> "SyncStream": + """ + Returns a new stream that is the result of joining with the other stream. + The join is performed on the tags of the packets in the streams. + """ + from .operators import Join + + if not isinstance(other, SyncStream): + raise TypeError("other must be a SyncStream") + return Join(label=label)(self, other) + + def semijoin(self, other: "SyncStream", label: str | None = None) -> "SyncStream": + """ + Returns a new stream that is the result of semijoining with the other stream. + The semijoin is performed on the tags of the packets in the streams. + """ + from .operators import SemiJoin + + if not isinstance(other, SyncStream): + raise TypeError("other must be a SyncStream") + return SemiJoin(label=label)(self, other) + + def map( + self, + packet_map: dict | None = None, + tag_map: dict | None = None, + drop_unmapped: bool = True, + label: str | None = None, + ) -> "SyncStream": + """ + Returns a new stream that is the result of mapping the packets and tags in the stream. + The mapping is applied to each packet in the stream and the resulting packets + are returned in a new stream. + If packet_map is None, no mapping is applied to the packets. + If tag_map is None, no mapping is applied to the tags. + """ + from .operators import MapTags, MapPackets + + output = self + if packet_map is not None: + output = MapPackets(packet_map, drop_unmapped=drop_unmapped, label=label)( + output + ) + if tag_map is not None: + output = MapTags(tag_map, drop_unmapped=drop_unmapped, label=label)(output) + + return output + + def apply(self, transformer: "dict | Operator") -> "SyncStream": + """ + Returns a new stream that is the result of applying the mapping to the stream. + The mapping is applied to each packet in the stream and the resulting packets + are returned in a new stream. + """ + from .operators import MapPackets + + if isinstance(transformer, dict): + return MapPackets(transformer)(self) + elif isinstance(transformer, Operator): + # If the transformer is an Operator, we can apply it directly + return transformer(self) + + # Otherwise, do not know how to handle the transformer + raise TypeError("transformer must be a dictionary or an operator") + def __rshift__( self, transformer: dict | Callable[["SyncStream"], "SyncStream"] ) -> "SyncStream": @@ -440,7 +515,6 @@ def __rshift__( The mapping is applied to each packet in the stream and the resulting packets are returned in a new stream. """ - # TODO: remove just in time import from .operators import MapPackets if isinstance(transformer, dict): @@ -457,7 +531,6 @@ def __mul__(self, other: "SyncStream") -> "SyncStream": """ Returns a new stream that is the result joining with the other stream """ - # TODO: remove just in time import from .operators import Join if not isinstance(other, SyncStream): diff --git a/src/orcapod/core/operators.py b/src/orcapod/core/operators.py index 093167b..5049e8e 100644 --- a/src/orcapod/core/operators.py +++ b/src/orcapod/core/operators.py @@ -3,21 +3,19 @@ from itertools import chain from typing import Any - -from orcapod.core.base import Operator, SyncStream -from orcapod.hashing import function_content_hash, hash_function +from orcapod.types import Packet, Tag, TypeSpec +from orcapod.types.typespec_utils import union_typespecs, intersection_typespecs +from orcapod.core.base import Kernel, SyncStream, Operator from orcapod.core.streams import SyncStreamFromGenerator from orcapod.utils.stream_utils import ( batch_packet, batch_tags, check_packet_compatibility, join_tags, + semijoin_tags, fill_missing, - merge_typespecs, ) -from orcapod.types import Packet, Tag, TypeSpec - class Repeat(Operator): """ @@ -25,8 +23,8 @@ class Repeat(Operator): The repeat count is the number of times to repeat each packet. """ - def __init__(self, repeat_count: int) -> None: - super().__init__() + def __init__(self, repeat_count: int, **kwargs) -> None: + super().__init__(**kwargs) if not isinstance(repeat_count, int): raise TypeError("repeat_count must be an integer") if repeat_count < 0: @@ -187,11 +185,43 @@ def claims_unique_tags( return True +def union_lists(left, right): + if left is None or right is None: + return None + output = list(left) + for item in right: + if item not in output: + output.append(item) + return output + + class Join(Operator): def identity_structure(self, *streams): # Join does not depend on the order of the streams -- convert it onto a set return (self.__class__.__name__, set(streams)) + def keys( + self, *streams: SyncStream, trigger_run=False + ) -> tuple[Collection[str] | None, Collection[str] | None]: + """ + Returns the types of the operation. + The first list contains the keys of the tags, and the second list contains the keys of the packets. + The keys are returned if it is feasible to do so, otherwise a tuple + (None, None) is returned to signify that the keys are not known. + """ + if len(streams) != 2: + raise ValueError("Join operation requires exactly two streams") + + left_stream, right_stream = streams + left_tag_keys, left_packet_keys = left_stream.keys(trigger_run=trigger_run) + right_tag_keys, right_packet_keys = right_stream.keys(trigger_run=trigger_run) + + # TODO: do error handling when merge fails + joined_tag_keys = union_lists(left_tag_keys, right_tag_keys) + joined_packet_keys = union_lists(left_packet_keys, right_packet_keys) + + return joined_tag_keys, joined_packet_keys + def types( self, *streams: SyncStream, trigger_run=False ) -> tuple[TypeSpec | None, TypeSpec | None]: @@ -209,8 +239,8 @@ def types( right_tag_types, right_packet_types = right_stream.types(trigger_run=False) # TODO: do error handling when merge fails - joined_tag_types = merge_typespecs(left_tag_types, right_tag_types) - joined_packet_types = merge_typespecs(left_packet_types, right_packet_types) + joined_tag_types = union_typespecs(left_tag_types, right_tag_types) + joined_packet_types = union_typespecs(left_packet_types, right_packet_types) return joined_tag_types, joined_packet_types @@ -225,14 +255,13 @@ def forward(self, *streams: SyncStream) -> SyncStream: left_stream, right_stream = streams def generator() -> Iterator[tuple[Tag, Packet]]: - for left_tag, left_packet in left_stream: - for right_tag, right_packet in right_stream: + # using list comprehension rather than list() to avoid call to __len__ which is expensive + left_stream_buffered = [e for e in left_stream] + right_stream_buffered = [e for e in right_stream] + for left_tag, left_packet in left_stream_buffered: + for right_tag, right_packet in right_stream_buffered: if (joined_tag := join_tags(left_tag, right_tag)) is not None: - if not check_packet_compatibility(left_packet, right_packet): - raise ValueError( - f"Packets are not compatible: {left_packet} and {right_packet}" - ) - yield joined_tag, {**left_packet, **right_packet} + yield joined_tag, left_packet.join(right_packet) return SyncStreamFromGenerator(generator) @@ -271,7 +300,7 @@ def generator(): ) # match is found - remove the packet from the inner stream inner_stream.pop(idx) - yield joined_tag, {**outer_packet, **inner_packet} + yield joined_tag, Packet({**outer_packet, **inner_packet}) # if enough matches found, move onto the next outer stream packet break @@ -338,8 +367,8 @@ def types( ): return super().types(*streams, trigger_run=trigger_run) - joined_tag_types = merge_typespecs(left_tag_types, right_tag_types) - joined_packet_types = merge_typespecs(left_packet_types, right_packet_types) + joined_tag_types = union_typespecs(left_tag_types, right_tag_types) + joined_packet_types = union_typespecs(left_packet_types, right_packet_types) return joined_tag_types, joined_packet_types @@ -352,8 +381,10 @@ class MapPackets(Operator): drop_unmapped=False, in which case unmapped keys will be retained. """ - def __init__(self, key_map: dict[str, str], drop_unmapped: bool = True) -> None: - super().__init__() + def __init__( + self, key_map: dict[str, str], drop_unmapped: bool = True, **kwargs + ) -> None: + super().__init__(**kwargs) self.key_map = key_map self.drop_unmapped = drop_unmapped @@ -365,13 +396,7 @@ def forward(self, *streams: SyncStream) -> SyncStream: def generator(): for tag, packet in stream: - if self.drop_unmapped: - packet = { - v: packet[k] for k, v in self.key_map.items() if k in packet - } - else: - packet = {self.key_map.get(k, k): v for k, v in packet.items()} - yield tag, packet + yield tag, packet.map_keys(self.key_map, self.drop_unmapped) return SyncStreamFromGenerator(generator) @@ -400,7 +425,12 @@ def keys( stream = streams[0] tag_keys, packet_keys = stream.keys(trigger_run=trigger_run) if tag_keys is None or packet_keys is None: - return super().keys(trigger_run=trigger_run) + super_tag_keys, super_packet_keys = super().keys(trigger_run=trigger_run) + tag_keys = tag_keys or super_tag_keys + packet_keys = packet_keys or super_packet_keys + + if packet_keys is None: + return tag_keys, packet_keys if self.drop_unmapped: # If drop_unmapped is True, we only keep the keys that are in the mapping @@ -426,7 +456,12 @@ def types( stream = streams[0] tag_types, packet_types = stream.types(trigger_run=trigger_run) if tag_types is None or packet_types is None: - return super().types(trigger_run=trigger_run) + super_tag_types, super_packet_types = super().types(trigger_run=trigger_run) + tag_types = tag_types or super_tag_types + packet_types = packet_types or super_packet_types + + if packet_types is None: + return tag_types, packet_types if self.drop_unmapped: # If drop_unmapped is True, we only keep the keys that are in the mapping @@ -448,8 +483,8 @@ class DefaultTag(Operator): tag already contains the same key, it will not be overwritten. """ - def __init__(self, default_tag: Tag) -> None: - super().__init__() + def __init__(self, default_tag: Tag, **kwargs) -> None: + super().__init__(**kwargs) self.default_tag = default_tag def forward(self, *streams: SyncStream) -> SyncStream: @@ -494,8 +529,10 @@ class MapTags(Operator): drop_unmapped=False, in which case unmapped tags will be retained. """ - def __init__(self, key_map: dict[str, str], drop_unmapped: bool = True) -> None: - super().__init__() + def __init__( + self, key_map: dict[str, str], drop_unmapped: bool = True, **kwargs + ) -> None: + super().__init__(**kwargs) self.key_map = key_map self.drop_unmapped = drop_unmapped @@ -551,6 +588,73 @@ def keys( return mapped_tag_keys, packet_keys +class SemiJoin(Operator): + """ + Perform semi-join on the left stream tags with the tags of the right stream + """ + + def identity_structure(self, *streams): + # Restrict DOES depend on the order of the streams -- maintain as a tuple + return (self.__class__.__name__,) + streams + + def keys( + self, *streams: SyncStream, trigger_run=False + ) -> tuple[Collection[str] | None, Collection[str] | None]: + """ + For semijoin, output keys and types are identical to left stream + """ + if len(streams) != 2: + raise ValueError("Join operation requires exactly two streams") + + return streams[0].keys(trigger_run=trigger_run) + + def types( + self, *streams: SyncStream, trigger_run=False + ) -> tuple[TypeSpec | None, TypeSpec | None]: + """ + For semijoin, output keys and types are identical to left stream + """ + if len(streams) != 2: + raise ValueError("Join operation requires exactly two streams") + + return streams[0].types(trigger_run=trigger_run) + + def forward(self, *streams: SyncStream) -> SyncStream: + """ + Joins two streams together based on their tags. + The resulting stream will contain all the tags from both streams. + """ + if len(streams) != 2: + raise ValueError("Join operation requires exactly two streams") + + left_stream, right_stream = streams + left_tag_typespec, left_packet_typespec = left_stream.types() + right_tag_typespec, right_packet_typespec = right_stream.types() + + common_tag_typespec = intersection_typespecs( + left_tag_typespec, right_tag_typespec + ) + common_tag_keys = None + if common_tag_typespec is not None: + common_tag_keys = list(common_tag_typespec.keys()) + + def generator() -> Iterator[tuple[Tag, Packet]]: + # using list comprehension rather than list() to avoid call to __len__ which is expensive + left_stream_buffered = [e for e in left_stream] + right_stream_buffered = [e for e in right_stream] + for left_tag, left_packet in left_stream_buffered: + for right_tag, _ in right_stream_buffered: + if semijoin_tags(left_tag, right_tag, common_tag_keys) is not None: + yield left_tag, left_packet + # move onto next entry + break + + return SyncStreamFromGenerator(generator) + + def __repr__(self) -> str: + return "SemiJoin()" + + class Filter(Operator): """ A Mapper that filters the packets in the stream based on a predicate function. @@ -558,8 +662,8 @@ class Filter(Operator): The predicate function should return True for packets that should be kept and False for packets that should be dropped. """ - def __init__(self, predicate: Callable[[Tag, Packet], bool]): - super().__init__() + def __init__(self, predicate: Callable[[Tag, Packet], bool], **kwargs): + super().__init__(**kwargs) self.predicate = predicate def forward(self, *streams: SyncStream) -> SyncStream: @@ -604,8 +708,10 @@ class Transform(Operator): The transformation function should return a tuple of (new_tag, new_packet). """ - def __init__(self, transform: Callable[[Tag, Packet], tuple[Tag, Packet]]): - super().__init__() + def __init__( + self, transform: Callable[[Tag, Packet], tuple[Tag, Packet]], **kwargs + ): + super().__init__(**kwargs) self.transform = transform def forward(self, *streams: SyncStream) -> SyncStream: @@ -642,8 +748,9 @@ def __init__( batch_size: int, tag_processor: None | Callable[[Collection[Tag]], Tag] = None, drop_last: bool = True, + **kwargs, ): - super().__init__() + super().__init__(**kwargs) self.batch_size = batch_size if tag_processor is None: tag_processor = batch_tags # noqa: E731 @@ -706,8 +813,9 @@ def __init__( reduce_keys: bool = False, selection_function: Callable[[Collection[tuple[Tag, Packet]]], Collection[bool]] | None = None, + **kwargs, ) -> None: - super().__init__() + super().__init__(**kwargs) self.group_keys = group_keys self.reduce_keys = reduce_keys self.selection_function = selection_function @@ -753,9 +861,9 @@ def generator() -> Iterator[tuple[Tag, Packet]]: if k not in new_tag: new_tag[k] = [t.get(k, None) for t, _ in packets] # combine all packets into a single packet - combined_packet: Packet = { - k: [p.get(k, None) for _, p in packets] for k in packet_keys - } + combined_packet: Packet = Packet( + {k: [p.get(k, None) for _, p in packets] for k in packet_keys} + ) yield new_tag, combined_packet return SyncStreamFromGenerator(generator) @@ -775,8 +883,8 @@ class CacheStream(Operator): Call `clear_cache()` to clear the cache. """ - def __init__(self) -> None: - super().__init__() + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) self.cache: list[tuple[Tag, Packet]] = [] self.is_cached = False diff --git a/src/orcapod/core/pod.py b/src/orcapod/core/pod.py new file mode 100644 index 0000000..3ca7d6b --- /dev/null +++ b/src/orcapod/core/pod.py @@ -0,0 +1,335 @@ +import logging +import warnings +import sys +from collections.abc import Callable, Collection, Iterable, Iterator, Sequence +from typing import ( + Any, + Literal, +) + +from orcapod.types import Packet, Tag, TypeSpec, default_registry +from orcapod.types.typespec_utils import ( + extract_function_typespecs, + check_typespec_compatibility, +) +from orcapod.types.legacy.packets import PacketConverter + +from orcapod.hashing import ( + FunctionInfoExtractor, +) +from orcapod.hashing.legacy_core import get_function_signature +from orcapod.core import Kernel +from orcapod.core.operators import Join +from orcapod.core.streams import ( + SyncStream, + SyncStreamFromGenerator, +) + +logger = logging.getLogger(__name__) + + +class Pod(Kernel): + """ + An (abstract) base class for all pods. A pod can be seen as a special type of operation that + only operates on the packet content without reading tags. Consequently, no operation + of Pod can dependent on the tags of the packets. This is a design choice to ensure that + the pods act as pure functions which is a necessary condition to guarantee reproducibility. + """ + + def __init__( + self, error_handling: Literal["raise", "ignore", "warn"] = "raise", **kwargs + ): + super().__init__(**kwargs) + self._active = True + self.error_handling = error_handling + + def is_active(self) -> bool: + """ + Check if the pod is active. If not, it will not process any packets. + """ + return self._active + + def set_active(self, active: bool) -> None: + """ + Set the active state of the pod. If set to False, the pod will not process any packets. + """ + self._active = active + + def process_stream(self, *streams: SyncStream) -> tuple[SyncStream, ...]: + """ + Prepare the incoming streams for execution in the pod. This default implementation + joins all the input streams together. + """ + # if multiple streams are provided, join them + # otherwise, return as is + combined_streams = list(streams) + if len(streams) > 1: + stream = streams[0] + for next_stream in streams[1:]: + stream = Join()(stream, next_stream) + combined_streams = [stream] + return tuple(combined_streams) + + def pre_forward_hook( + self, *streams: SyncStream, **kwargs + ) -> tuple[SyncStream, ...]: + return self.process_stream(*streams) + + def generator_completion_hook(self, n_computed: int) -> None: + """ + Hook that is called when the generator is completed. This can be used to + perform any finalization steps, such as closing resources or logging. + """ + logger.debug(f"Generator completed with {n_computed} items processed.") + + def forward(self, *streams: SyncStream) -> SyncStream: + # at this point, streams should have been joined into one + assert len(streams) == 1, "Only one stream is supported in forward() of Pod" + stream = streams[0] + + def generator() -> Iterator[tuple[Tag, Packet]]: + n_computed = 0 + for tag, packet in stream: + try: + tag, output_packet = self.call(tag, packet) + if output_packet is None: + logger.debug( + f"Call returned None as output for tag {tag}. Skipping..." + ) + continue + n_computed += 1 + logger.debug(f"Computed item {n_computed}") + yield tag, output_packet + + except Exception as e: + logger.error(f"Error processing packet {packet}: {e}") + if self.error_handling == "raise": + raise e + elif self.error_handling == "warn": + warnings.warn(f"Error processing packet {packet}: {e}") + continue + elif self.error_handling == "ignore": + continue + else: + raise ValueError( + f"Unknown error handling mode: {self.error_handling} encountered while handling error:" + ) from e + self.generator_completion_hook(n_computed) + + return SyncStreamFromGenerator(generator) + + def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet | None]: ... + + +def function_pod( + output_keys: str | Collection[str] | None = None, + function_name: str | None = None, + label: str | None = None, + **kwargs, +) -> Callable[..., "FunctionPod"]: + """ + Decorator that wraps a function in a FunctionPod instance. + + Args: + output_keys: Keys for the function output(s) + function_name: Name of the function pod; if None, defaults to the function name + **kwargs: Additional keyword arguments to pass to the FunctionPod constructor. Please refer to the FunctionPod documentation for details. + + Returns: + FunctionPod instance wrapping the decorated function + """ + + def decorator(func) -> FunctionPod: + if func.__name__ == "": + raise ValueError("Lambda functions cannot be used with function_pod") + + if not hasattr(func, "__module__") or func.__module__ is None: + raise ValueError( + f"Function {func.__name__} must be defined at module level" + ) + + # Store the original function in the module for pickling purposes + # and make sure to change the name of the function + module = sys.modules[func.__module__] + base_function_name = func.__name__ + new_function_name = f"_original_{func.__name__}" + setattr(module, new_function_name, func) + # rename the function to be consistent and make it pickleable + setattr(func, "__name__", new_function_name) + setattr(func, "__qualname__", new_function_name) + + # Create a simple typed function pod + pod = FunctionPod( + function=func, + output_keys=output_keys, + function_name=function_name or base_function_name, + label=label, + **kwargs, + ) + return pod + + return decorator + + +class FunctionPod(Pod): + def __init__( + self, + function: Callable[..., Any], + output_keys: str | Collection[str] | None = None, + function_name=None, + input_types: TypeSpec | None = None, + output_types: TypeSpec | Sequence[type] | None = None, + label: str | None = None, + packet_type_registry=None, + function_info_extractor: FunctionInfoExtractor | None = None, + **kwargs, + ) -> None: + self.function = function + if output_keys is None: + output_keys = [] + if isinstance(output_keys, str): + output_keys = [output_keys] + self.output_keys = output_keys + if function_name is None: + if hasattr(self.function, "__name__"): + function_name = getattr(self.function, "__name__") + else: + raise ValueError( + "function_name must be provided if function has no __name__ attribute" + ) + self.function_name = function_name + super().__init__(label=label or self.function_name, **kwargs) + + if packet_type_registry is None: + # TODO: reconsider the use of default registry here + packet_type_registry = default_registry + + self.registry = packet_type_registry + self.function_info_extractor = function_info_extractor + + # extract input and output types from the function signature + self.function_input_typespec, self.function_output_typespec = ( + extract_function_typespecs( + self.function, + self.output_keys, + input_types=input_types, + output_types=output_types, + ) + ) + + self.input_converter = PacketConverter( + self.function_input_typespec, self.registry + ) + self.output_converter = PacketConverter( + self.function_output_typespec, self.registry + ) + + def forward(self, *streams: SyncStream, **kwargs) -> SyncStream: + assert len(streams) == 1, ( + "Only one stream is supported in forward() of FunctionPod" + ) + stream = streams[0] + _, packet_typespec = stream.types(trigger_run=False) + if packet_typespec is not None and not check_typespec_compatibility( + packet_typespec, self.function_input_typespec + ): + raise TypeError( + f"Input packet types {packet_typespec} is not compatible with the function's expected input types {self.function_input_typespec}" + ) + return super().forward(*streams, **kwargs) + + def get_function_typespecs(self) -> tuple[TypeSpec, TypeSpec]: + return self.function_input_typespec, self.function_output_typespec + + def __repr__(self) -> str: + return f"FunctionPod:{self.function!r}" + + def __str__(self) -> str: + include_module = self.function.__module__ != "__main__" + func_sig = get_function_signature( + self.function, + name_override=self.function_name, + include_module=include_module, + ) + return f"FunctionPod:{func_sig}" + + def call(self, tag, packet) -> tuple[Tag, Packet | None]: + if not self.is_active(): + logger.info( + f"Pod is not active: skipping computation on input packet {packet}" + ) + return tag, None + output_values = [] + + values = self.function(**packet) + + if len(self.output_keys) == 0: + output_values = [] + elif len(self.output_keys) == 1: + output_values = [values] # type: ignore + elif isinstance(values, Iterable): + output_values = list(values) # type: ignore + elif len(self.output_keys) > 1: + raise ValueError( + "Values returned by function must be a pathlike or a sequence of pathlikes" + ) + + if len(output_values) != len(self.output_keys): + raise ValueError( + f"Number of output keys {len(self.output_keys)}:{self.output_keys} does not match number of values returned by function {len(output_values)}" + ) + + output_packet: Packet = Packet( + {k: v for k, v in zip(self.output_keys, output_values)} + ) + return tag, output_packet + + def identity_structure(self, *streams) -> Any: + # construct identity structure for the function + # if function_info_extractor is available, use that but substitute the function_name + if self.function_info_extractor is not None: + function_info = self.function_info_extractor.extract_function_info( + self.function, + function_name=self.function_name, + input_typespec=self.function_input_typespec, + output_typespec=self.function_output_typespec, + ) + else: + # use basic information only + function_info = { + "name": self.function_name, + "input_typespec": self.function_input_typespec, + "output_typespec": self.function_output_typespec, + } + function_info["output_keys"] = tuple(self.output_keys) + + return ( + self.__class__.__name__, + function_info, + ) + streams + + def keys( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[Collection[str] | None, Collection[str] | None]: + stream = self.process_stream(*streams) + if len(stream) < 1: + tag_keys = None + else: + tag_keys, _ = stream[0].keys(trigger_run=trigger_run) + return tag_keys, tuple(self.output_keys) + + def types( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[TypeSpec | None, TypeSpec | None]: + stream = self.process_stream(*streams) + if len(stream) < 1: + tag_typespec = None + else: + tag_typespec, _ = stream[0].types(trigger_run=trigger_run) + return tag_typespec, self.function_output_typespec + + def claims_unique_tags( + self, *streams: SyncStream, trigger_run: bool = False + ) -> bool | None: + stream = self.process_stream(*streams) + return stream[0].claims_unique_tags(trigger_run=trigger_run) diff --git a/src/orcapod/core/pod_legacy.py b/src/orcapod/core/pod_legacy.py new file mode 100644 index 0000000..18099c6 --- /dev/null +++ b/src/orcapod/core/pod_legacy.py @@ -0,0 +1,373 @@ +import logging +import warnings +import sys +from collections.abc import Callable, Collection, Iterable, Iterator +from typing import ( + Any, + Literal, +) + +from orcapod.types import Packet, PathSet, PodFunction, Tag + +from orcapod.hashing import ( + get_function_signature, + hash_function, +) +from orcapod.core.base import Kernel +from orcapod.core.operators import Join +from orcapod.core.streams import SyncStream, SyncStreamFromGenerator +from orcapod.stores import DataStore, NoOpDataStore + + +logger = logging.getLogger(__name__) + + +class Pod(Kernel): + """ + An (abstract) base class for all pods. A pod can be seen as a special type of operation that + only operates on the packet content without reading tags. Consequently, no operation + of Pod can dependent on the tags of the packets. This is a design choice to ensure that + the pods act as pure functions which is a necessary condition to guarantee reproducibility. + """ + + def __init__( + self, error_handling: Literal["raise", "ignore", "warn"] = "raise", **kwargs + ): + super().__init__(**kwargs) + self.error_handling = error_handling + self._active = True + + def set_active(self, active=True): + self._active = active + + def is_active(self) -> bool: + return self._active + + def process_stream(self, *streams: SyncStream) -> tuple[SyncStream, ...]: + """ + Prepare the incoming streams for execution in the pod. This default implementation + joins all the streams together and raises and error if no streams are provided. + """ + # if multiple streams are provided, join them + # otherwise, return as is + combined_streams = list(streams) + if len(streams) > 1: + stream = streams[0] + for next_stream in streams[1:]: + stream = Join()(stream, next_stream) + combined_streams = [stream] + return tuple(combined_streams) + + def pre_forward_hook( + self, *streams: SyncStream, **kwargs + ) -> tuple[SyncStream, ...]: + return self.process_stream(*streams) + + def forward(self, *streams: SyncStream) -> SyncStream: + # if multiple streams are provided, join them + if len(streams) > 1: + raise ValueError("Multiple streams should be joined before calling forward") + if len(streams) == 0: + raise ValueError("No streams provided to forward") + stream = streams[0] + + def generator() -> Iterator[tuple[Tag, Packet]]: + n_computed = 0 + for tag, packet in stream: + try: + tag, output_packet = self.call(tag, packet) + if output_packet is None: + logger.info( + f"Call returned None as output for tag {tag}. Skipping..." + ) + continue + n_computed += 1 + logger.info(f"Computed item {n_computed}") + yield tag, output_packet + + except Exception as e: + logger.error(f"Error processing packet {packet}: {e}") + if self.error_handling == "raise": + raise e + elif self.error_handling == "warn": + warnings.warn(f"Error processing packet {packet}: {e}") + continue + elif self.error_handling == "ignore": + continue + else: + raise ValueError( + f"Unknown error handling mode: {self.error_handling} encountered while handling error:" + ) from e + + return SyncStreamFromGenerator(generator) + + def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet | None]: ... + + +def function_pod( + output_keys: Collection[str] | None = None, + function_name: str | None = None, + data_store: DataStore | None = None, + store_name: str | None = None, + function_hash_mode: Literal["signature", "content", "name", "custom"] = "name", + custom_hash: int | None = None, + force_computation: bool = False, + skip_memoization: bool = False, + error_handling: Literal["raise", "ignore", "warn"] = "raise", + **kwargs, +) -> Callable[..., "FunctionPod"]: + """ + Decorator that wraps a function in a FunctionPod instance. + + Args: + output_keys: Keys for the function output + force_computation: Whether to force computation + skip_memoization: Whether to skip memoization + + Returns: + FunctionPod instance wrapping the decorated function + """ + + def decorator(func) -> FunctionPod: + if func.__name__ == "": + raise ValueError("Lambda functions cannot be used with function_pod") + + if not hasattr(func, "__module__") or func.__module__ is None: + raise ValueError( + f"Function {func.__name__} must be defined at module level" + ) + + # Store the original function in the module for pickling purposes + # and make sure to change the name of the function + module = sys.modules[func.__module__] + base_function_name = func.__name__ + new_function_name = f"_original_{func.__name__}" + setattr(module, new_function_name, func) + # rename the function to be consistent and make it pickleable + setattr(func, "__name__", new_function_name) + setattr(func, "__qualname__", new_function_name) + + # Create the FunctionPod + pod = FunctionPod( + function=func, + output_keys=output_keys, + function_name=function_name or base_function_name, + data_store=data_store, + store_name=store_name, + function_hash_mode=function_hash_mode, + custom_hash=custom_hash, + force_computation=force_computation, + skip_memoization=skip_memoization, + error_handling=error_handling, + **kwargs, + ) + + return pod + + return decorator + + +class FunctionPod(Pod): + """ + A pod that wraps a function and allows it to be used as an operation in a stream. + This pod can be used to apply a function to the packets in a stream, with optional memoization + and caching of results. It can also handle multiple output keys and error handling. + The function should accept keyword arguments that correspond to the keys in the packets. + The output of the function should be a path or a collection of paths that correspond to the output keys.""" + + def __init__( + self, + function: PodFunction, + output_keys: Collection[str] | None = None, + function_name=None, + data_store: DataStore | None = None, + store_name: str | None = None, + function_hash_mode: Literal["signature", "content", "name", "custom"] = "name", + custom_hash: int | None = None, + label: str | None = None, + force_computation: bool = False, + skip_memoization_lookup: bool = False, + skip_memoization: bool = False, + error_handling: Literal["raise", "ignore", "warn"] = "raise", + _hash_function_kwargs: dict | None = None, + **kwargs, + ) -> None: + super().__init__(label=label, **kwargs) + self.function = function + self.output_keys = output_keys or [] + if function_name is None: + if hasattr(self.function, "__name__"): + function_name = getattr(self.function, "__name__") + else: + raise ValueError( + "function_name must be provided if function has no __name__ attribute" + ) + + self.function_name = function_name + self.data_store = data_store if data_store is not None else NoOpDataStore() + self.store_name = store_name or function_name + self.function_hash_mode = function_hash_mode + self.custom_hash = custom_hash + self.force_computation = force_computation + self.skip_memoization_lookup = skip_memoization_lookup + self.skip_memoization = skip_memoization + self.error_handling = error_handling + self._hash_function_kwargs = _hash_function_kwargs + + def __repr__(self) -> str: + func_sig = get_function_signature(self.function) + return f"FunctionPod:{func_sig} ⇒ {self.output_keys}" + + def keys( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[Collection[str] | None, Collection[str] | None]: + stream = self.process_stream(*streams) + tag_keys, _ = stream[0].keys(trigger_run=trigger_run) + return tag_keys, tuple(self.output_keys) + + def is_memoized(self, packet: Packet) -> bool: + return self.retrieve_memoized(packet) is not None + + def retrieve_memoized(self, packet: Packet) -> Packet | None: + """ + Retrieve a memoized packet from the data store. + Returns None if no memoized packet is found. + """ + return self.data_store.retrieve_memoized( + self.store_name, + self.content_hash(char_count=16), + packet, + ) + + def memoize( + self, + packet: Packet, + output_packet: Packet, + ) -> Packet: + """ + Memoize the output packet in the data store. + Returns the memoized packet. + """ + return self.data_store.memoize( + self.store_name, + self.content_hash(char_count=16), # identity of this function pod + packet, + output_packet, + ) + + def forward(self, *streams: SyncStream) -> SyncStream: + # if multiple streams are provided, join them + if len(streams) > 1: + raise ValueError("Multiple streams should be joined before calling forward") + if len(streams) == 0: + raise ValueError("No streams provided to forward") + stream = streams[0] + + def generator() -> Iterator[tuple[Tag, Packet]]: + n_computed = 0 + for tag, packet in stream: + output_values: list["PathSet"] = [] + try: + if not self.skip_memoization_lookup: + memoized_packet = self.retrieve_memoized(packet) + else: + memoized_packet = None + if not self.force_computation and memoized_packet is not None: + logger.info("Memoized packet found, skipping computation") + yield tag, memoized_packet + continue + if not self.is_active(): + logger.info( + "Pod is not active: skipping computation of a new entry" + ) + continue + values = self.function(**packet) + + if len(self.output_keys) == 0: + output_values = [] + elif len(self.output_keys) == 1: + output_values = [values] # type: ignore + elif isinstance(values, Iterable): + output_values = list(values) # type: ignore + elif len(self.output_keys) > 1: + raise ValueError( + "Values returned by function must be a pathlike or a sequence of pathlikes" + ) + + if len(output_values) != len(self.output_keys): + raise ValueError( + f"Number of output keys {len(self.output_keys)}:{self.output_keys} does not match number of values returned by function {len(output_values)}" + ) + except Exception as e: + logger.error(f"Error processing packet {packet}: {e}") + if self.error_handling == "raise": + raise e + elif self.error_handling == "ignore": + continue + elif self.error_handling == "warn": + warnings.warn(f"Error processing packet {packet}: {e}") + continue + else: + raise ValueError( + f"Unknown error handling mode: {self.error_handling} encountered while handling error:" + ) from e + + output_packet: Packet = { + k: v for k, v in zip(self.output_keys, output_values) + } + + if not self.skip_memoization: + # output packet may be modified by the memoization process + # e.g. if the output is a file, the path may be changed + output_packet = self.memoize(packet, output_packet) # type: ignore + + n_computed += 1 + logger.info(f"Computed item {n_computed}") + yield tag, output_packet + + return SyncStreamFromGenerator(generator) + + def identity_structure(self, *streams) -> Any: + content_kwargs = self._hash_function_kwargs + if self.function_hash_mode == "content": + if content_kwargs is None: + content_kwargs = { + "include_name": False, + "include_module": False, + "include_declaration": False, + } + function_hash_value = hash_function( + self.function, + name_override=self.function_name, + function_hash_mode="content", + content_kwargs=content_kwargs, + ) + elif self.function_hash_mode == "signature": + function_hash_value = hash_function( + self.function, + name_override=self.function_name, + function_hash_mode="signature", + content_kwargs=content_kwargs, + ) + elif self.function_hash_mode == "name": + function_hash_value = hash_function( + self.function, + name_override=self.function_name, + function_hash_mode="name", + content_kwargs=content_kwargs, + ) + elif self.function_hash_mode == "custom": + if self.custom_hash is None: + raise ValueError("Custom hash function not provided") + function_hash_value = self.custom_hash + else: + raise ValueError( + f"Unknown function hash mode: {self.function_hash_mode}. " + "Must be one of 'content', 'signature', 'name', or 'custom'." + ) + + return ( + self.__class__.__name__, + function_hash_value, + tuple(self.output_keys), + ) + tuple(streams) diff --git a/src/orcapod/core/sources.py b/src/orcapod/core/sources.py index 33df20d..b1dca7d 100644 --- a/src/orcapod/core/sources.py +++ b/src/orcapod/core/sources.py @@ -3,10 +3,17 @@ from pathlib import Path from typing import Any, Literal +import polars as pl + from orcapod.core.base import Source -from orcapod.hashing import hash_function -from orcapod.core.streams import SyncStream, SyncStreamFromGenerator -from orcapod.types import Packet, Tag +from orcapod.hashing.legacy_core import hash_function +from orcapod.core.streams import ( + PolarsStream, + SyncStream, + SyncStreamFromGenerator, + StreamWrapper, +) +from orcapod.types import Packet, Tag, TypeSpec class GlobSource(Source): @@ -78,7 +85,7 @@ def forward(self, *streams: SyncStream) -> SyncStream: def generator() -> Iterator[tuple[Tag, Packet]]: for file in Path(self.file_path).glob(self.pattern): - yield self.tag_function(file), {self.name: str(file)} + yield self.tag_function(file), Packet({self.name: str(file)}) return SyncStreamFromGenerator(generator) @@ -139,3 +146,59 @@ def claims_unique_tags( return True # Otherwise, delegate to the base class return super().claims_unique_tags(trigger_run=trigger_run) + + +class PolarsSource(Source): + def __init__( + self, + df: pl.DataFrame, + tag_keys: Collection[str], + packet_keys: Collection[str] | None = None, + ): + self.df = df + self.tag_keys = tag_keys + self.packet_keys = packet_keys + + def forward(self, *streams: SyncStream, **kwargs) -> SyncStream: + if len(streams) != 0: + raise ValueError( + "PolarsSource does not support forwarding streams. " + "It generates its own stream from the DataFrame." + ) + return PolarsStream(self.df, self.tag_keys, self.packet_keys) + + +class StreamSource(Source): + def __init__(self, stream: SyncStream, **kwargs): + super().__init__(skip_tracking=True, **kwargs) + self.stream = stream + + def forward(self, *streams: SyncStream) -> SyncStream: + if len(streams) != 0: + raise ValueError( + "StreamSource does not support forwarding streams. " + "It generates its own stream from the file system." + ) + return StreamWrapper(self.stream) + + def identity_structure(self, *streams) -> Any: + if len(streams) != 0: + raise ValueError( + "StreamSource does not support forwarding streams. " + "It generates its own stream from the file system." + ) + + return (self.__class__.__name__, self.stream) + + def types( + self, *streams: SyncStream, **kwargs + ) -> tuple[TypeSpec | None, TypeSpec | None]: + return self.stream.types() + + def keys( + self, *streams: SyncStream, **kwargs + ) -> tuple[Collection[str] | None, Collection[str] | None]: + return self.stream.keys() + + def computed_label(self) -> str | None: + return self.stream.label diff --git a/src/orcapod/core/streams.py b/src/orcapod/core/streams.py index 77cdbe3..170c80d 100644 --- a/src/orcapod/core/streams.py +++ b/src/orcapod/core/streams.py @@ -1,31 +1,43 @@ from collections.abc import Callable, Collection, Iterator +import polars as pl + from orcapod.core.base import SyncStream -from orcapod.types import Packet, Tag +from orcapod.types import Packet, PacketLike, Tag, TypeSpec +from copy import copy class SyncStreamFromLists(SyncStream): def __init__( self, tags: Collection[Tag] | None = None, - packets: Collection[Packet] | None = None, - paired: Collection[tuple[Tag, Packet]] | None = None, + packets: Collection[PacketLike] | None = None, + paired: Collection[tuple[Tag, PacketLike]] | None = None, tag_keys: list[str] | None = None, packet_keys: list[str] | None = None, + tag_typespec: TypeSpec | None = None, + packet_typespec: TypeSpec | None = None, strict: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) + self.tag_typespec = tag_typespec + self.packet_typespec = packet_typespec + if tag_keys is None and tag_typespec is not None: + tag_keys = list(tag_typespec.keys()) + if packet_keys is None and packet_typespec is not None: + packet_keys = list(packet_typespec.keys()) self.tag_keys = tag_keys self.packet_keys = packet_keys + if tags is not None and packets is not None: if strict and len(tags) != len(packets): raise ValueError( "tags and packets must have the same length if both are provided" ) - self.paired = list(zip(tags, packets)) + self.paired = list((t, Packet(v)) for t, v in zip(tags, packets)) elif paired is not None: - self.paired = list(paired) + self.paired = list((t, Packet(v)) for t, v in paired) else: raise ValueError( "Either tags and packets or paired must be provided to SyncStreamFromLists" @@ -34,10 +46,31 @@ def __init__( def keys( self, *, trigger_run: bool = False ) -> tuple[Collection[str] | None, Collection[str] | None]: - if self.tag_keys is None or self.packet_keys is None: - return super().keys(trigger_run=trigger_run) + tag_keys, packet_keys = copy(self.tag_keys), copy(self.packet_keys) + if tag_keys is None or packet_keys is None: + super_tag_keys, super_packet_keys = super().keys(trigger_run=trigger_run) + tag_keys = tag_keys or super_tag_keys + packet_keys = packet_keys or super_packet_keys + # If the keys are already set, return them - return self.tag_keys.copy(), self.packet_keys.copy() + return tag_keys, packet_keys + + def types( + self, *, trigger_run: bool = False + ) -> tuple[TypeSpec | None, TypeSpec | None]: + tag_typespec, packet_typespec = ( + copy(self.tag_typespec), + copy(self.packet_typespec), + ) + if tag_typespec is None or packet_typespec is None: + super_tag_typespec, super_packet_typespec = super().types( + trigger_run=trigger_run + ) + tag_typespec = tag_typespec or super_tag_typespec + packet_typespec = packet_typespec or super_packet_typespec + + # If the types are already set, return them + return tag_typespec, packet_typespec def __iter__(self) -> Iterator[tuple[Tag, Packet]]: yield from self.paired @@ -59,10 +92,13 @@ def __init__( self.tag_keys = tag_keys self.packet_keys = packet_keys self.generator_factory = generator_factory + self.check_consistency = False def __iter__(self) -> Iterator[tuple[Tag, Packet]]: - yield from self.generator_factory() + if not self.check_consistency: + yield from self.generator_factory() + # TODO: add typespec handling def keys( self, *, trigger_run: bool = False ) -> tuple[Collection[str] | None, Collection[str] | None]: @@ -70,3 +106,98 @@ def keys( return super().keys(trigger_run=trigger_run) # If the keys are already set, return them return self.tag_keys.copy(), self.packet_keys.copy() + + +class PolarsStream(SyncStream): + def __init__( + self, + df: pl.DataFrame, + tag_keys: Collection[str], + packet_keys: Collection[str] | None = None, + ): + self.df = df + self.tag_keys = tuple(tag_keys) + self.packet_keys = tuple(packet_keys) if packet_keys is not None else None + + def __iter__(self) -> Iterator[tuple[Tag, Packet]]: + df = self.df + # if self.packet_keys is not None: + # df = df.select(self.tag_keys + self.packet_keys) + for row in df.iter_rows(named=True): + tag = {key: row[key] for key in self.tag_keys} + packet = { + key: val + for key, val in row.items() + if key not in self.tag_keys and not key.startswith("_source_info_") + } + # TODO: revisit and fix this rather hacky implementation + source_info = { + key.removeprefix("_source_info_"): val + for key, val in row.items() + if key.startswith("_source_info_") + } + yield tag, Packet(packet, source_info=source_info) + + +class EmptyStream(SyncStream): + def __init__( + self, + tag_keys: Collection[str] | None = None, + packet_keys: Collection[str] | None = None, + tag_typespec: TypeSpec | None = None, + packet_typespec: TypeSpec | None = None, + ): + if tag_keys is None and tag_typespec is not None: + tag_keys = tag_typespec.keys() + self.tag_keys = list(tag_keys) if tag_keys else [] + + if packet_keys is None and packet_typespec is not None: + packet_keys = packet_typespec.keys() + self.packet_keys = list(packet_keys) if packet_keys else [] + + self.tag_typespec = tag_typespec + self.packet_typespec = packet_typespec + + def keys( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[Collection[str] | None, Collection[str] | None]: + return self.tag_keys, self.packet_keys + + def types( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[TypeSpec | None, TypeSpec | None]: + return self.tag_typespec, self.packet_typespec + + def __iter__(self) -> Iterator[tuple[Tag, Packet]]: + # Empty stream, no data to yield + return iter([]) + + +class StreamWrapper(SyncStream): + """ + A wrapper for a SyncStream that allows the stream to be labeled and + associated with an invocation without modifying the original stream. + """ + + def __init__(self, stream: SyncStream, **kwargs): + super().__init__(**kwargs) + self.stream = stream + + def keys( + self, *streams: SyncStream, **kwargs + ) -> tuple[Collection[str] | None, Collection[str] | None]: + return self.stream.keys(*streams, **kwargs) + + def types( + self, *streams: SyncStream, **kwargs + ) -> tuple[TypeSpec | None, TypeSpec | None]: + return self.stream.types(*streams, **kwargs) + + def computed_label(self) -> str | None: + return self.stream.label + + def __iter__(self) -> Iterator[tuple[Tag, Packet]]: + """ + Iterate over the stream, yielding tuples of (tags, packets). + """ + yield from self.stream diff --git a/src/orcapod/core/tracker.py b/src/orcapod/core/tracker.py index efc2c42..e0a2bd7 100644 --- a/src/orcapod/core/tracker.py +++ b/src/orcapod/core/tracker.py @@ -1,4 +1,5 @@ from orcapod.core.base import Invocation, Kernel, Tracker +from orcapod.core.sources import StreamSource class GraphTracker(Tracker): @@ -44,6 +45,7 @@ def generate_namemap(self) -> dict[Invocation, str]: def generate_graph(self): import networkx as nx + G = nx.DiGraph() # Add edges for each invocation @@ -51,15 +53,20 @@ def generate_graph(self): for invocation in invocations: for upstream in invocation.streams: # if upstream.invocation is not in the graph, add it - if upstream.invocation not in G: - G.add_node(upstream.invocation) - G.add_edge(upstream.invocation, invocation, stream=upstream) + upstream_invocation = upstream.invocation + if upstream_invocation is None: + # If upstream is None, create a stub kernel + upstream_invocation = Invocation(StreamSource(upstream), []) + if upstream_invocation not in G: + G.add_node(upstream_invocation) + G.add_edge(upstream_invocation, invocation, stream=upstream) return G def draw_graph(self): import networkx as nx import matplotlib.pyplot as plt + G = self.generate_graph() labels = self.generate_namemap() diff --git a/src/orcapod/data/__init__.py b/src/orcapod/data/__init__.py new file mode 100644 index 0000000..eb005c1 --- /dev/null +++ b/src/orcapod/data/__init__.py @@ -0,0 +1,7 @@ +from .trackers import DEFAULT_TRACKER_MANAGER +from .system_constants import orcapod_constants as constants + +__all__ = [ + "DEFAULT_TRACKER_MANAGER", + "constants", +] diff --git a/src/orcapod/data/base.py b/src/orcapod/data/base.py new file mode 100644 index 0000000..dec4f06 --- /dev/null +++ b/src/orcapod/data/base.py @@ -0,0 +1,117 @@ +from typing import Any +from orcapod.protocols import hashing_protocols as hp +from orcapod.hashing.defaults import get_default_object_hasher +import logging + + +logger = logging.getLogger(__name__) + + +class LabeledContentIdentifiableBase: + """ + Base class for content-identifiable objects. + This class provides a way to define objects that can be uniquely identified + based on their content rather than their identity in memory. Specifically, the identity of the + object is determined by the structure returned by the `identity_structure` method. + The hash of the object is computed based on the `identity_structure` using the provided `ObjectHasher`, + which defaults to the one returned by `get_default_object_hasher`. + Two content-identifiable objects are considered equal if their `identity_structure` returns the same value. + """ + + def __init__( + self, + identity_structure_hasher: hp.ObjectHasher | None = None, + label: str | None = None, + ) -> None: + """ + Initialize the ContentHashable with an optional ObjectHasher. + + Args: + identity_structure_hasher (ObjectHasher | None): An instance of ObjectHasher to use for hashing. + """ + self.identity_structure_hasher = ( + identity_structure_hasher or get_default_object_hasher() + ) + self._label = label + + @property + def has_assigned_label(self) -> bool: + """ + Check if the label is explicitly set for this object. + + Returns: + bool: True if the label is explicitly set, False otherwise. + """ + return self._label is not None + + @property + def label(self) -> str: + """ + Get the label of this object. + + Returns: + str | None: The label of the object, or None if not set. + """ + return self._label or self.computed_label() or self.__class__.__name__ + + @label.setter + def label(self, label: str | None) -> None: + """ + Set the label of this object. + + Args: + label (str | None): The label to set for this object. + """ + self._label = label + + def computed_label(self) -> str | None: + """ + Compute a label for this object based on its content. If label is not explicitly set for this object + and computed_label returns a valid value, it will be used as label of this object. + """ + return None + + def identity_structure(self) -> Any: + """ + Return a structure that represents the identity of this object. + + Override this method in your subclass to provide a stable representation + of your object's content. The structure should contain all fields that + determine the object's identity. + + Returns: + Any: A structure representing this object's content, or None to use default hash + """ + # TODO: come up with a way to signify non-determinate identity structure + return None + + def __hash__(self) -> int: + """ + Hash implementation that uses the identity structure if provided, + otherwise falls back to the superclass's hash method. + + Returns: + int: A hash value based on either content or identity + """ + # Get the identity structure + structure = self.identity_structure() + if structure is None: + # If no identity structure is provided, use the default hash + return super().__hash__() + + return self.identity_structure_hasher.hash_to_int(structure) + + def __eq__(self, other: object) -> bool: + """ + Equality check that compares the identity structures of two objects. + + Args: + other (object): The object to compare against. + + Returns: + bool: True if both objects have the same identity structure, False otherwise. + """ + if not isinstance(other, LabeledContentIdentifiableBase): + return NotImplemented + + return self.identity_structure() == other.identity_structure() diff --git a/src/orcapod/data/context.py b/src/orcapod/data/context.py new file mode 100644 index 0000000..20bc43a --- /dev/null +++ b/src/orcapod/data/context.py @@ -0,0 +1,60 @@ +from orcapod.types.semantic_types import SemanticTypeRegistry +from orcapod.types import default_registry +from orcapod.protocols import hashing_protocols as hp +from orcapod.hashing.defaults import get_default_arrow_hasher, get_default_object_hasher +from dataclasses import dataclass + + +@dataclass +class DataContext: + context_key: str + semantic_type_registry: SemanticTypeRegistry + arrow_hasher: hp.ArrowHasher + object_hasher: hp.ObjectHasher + + @staticmethod + def resolve_data_context(data_context: "str | DataContext | None") -> "DataContext": + """ + Returns the default data context manager. + This is typically used when no specific context is provided. + """ + return orcapod_system_data_context_manager.resolve_context(data_context) + + +class DataContextManager(dict[str, DataContext]): + def register_context(self, data_context: DataContext): + """ + Register a new DataContext instance. + + Args: + data_context: The DataContext instance to register. + """ + if data_context.context_key in self: + raise ValueError( + f"DataContext with key {data_context.context_key} already exists." + ) + self[data_context.context_key] = data_context + + def resolve_context(self, context_info: str | DataContext | None) -> DataContext: + if isinstance(context_info, DataContext): + return context_info + if context_info is None: + return default_data_context + if isinstance(context_info, str): + if context_info in self: + return self[context_info] + else: + raise ValueError(f"DataContext with key {context_info} not found.") + + + +default_data_context = DataContext( + "std:v0.1.0:default", + default_registry, + get_default_arrow_hasher(), + get_default_object_hasher(), +) + + +orcapod_system_data_context_manager = DataContextManager() +orcapod_system_data_context_manager.register_context(default_data_context) diff --git a/src/orcapod/data/datagrams/__init__.py b/src/orcapod/data/datagrams/__init__.py new file mode 100644 index 0000000..0c255e3 --- /dev/null +++ b/src/orcapod/data/datagrams/__init__.py @@ -0,0 +1,13 @@ +from .arrow_datagram import ArrowDatagram +from .arrow_tag_packet import ArrowTag, ArrowPacket +from .dict_datagram import DictDatagram +from .dict_tag_packet import DictTag, DictPacket + +__all__ = [ + "ArrowDatagram", + "ArrowTag", + "ArrowPacket", + "DictDatagram", + "DictTag", + "DictPacket", +] diff --git a/src/orcapod/data/datagrams/arrow_datagram.py b/src/orcapod/data/datagrams/arrow_datagram.py new file mode 100644 index 0000000..c29cf58 --- /dev/null +++ b/src/orcapod/data/datagrams/arrow_datagram.py @@ -0,0 +1,845 @@ +import logging +from collections.abc import Collection, Iterator, Mapping +from typing import Self + +import pyarrow as pa + +from orcapod.data.context import ( + DataContext, +) +from orcapod.data.datagrams.base import BaseDatagram +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.types import schemas, typespec_utils +from orcapod.types.core import DataValue +from orcapod.types.semantic_converter import SemanticConverter +from orcapod.utils import arrow_utils + +logger = logging.getLogger(__name__) + + +class ArrowDatagram(BaseDatagram): + """ + Immutable datagram implementation using PyArrow Table as storage backend. + + This implementation provides high-performance columnar data operations while + maintaining the datagram interface. It efficiently handles type conversions, + semantic processing, and interoperability with Arrow-based tools. + + The underlying table is split into separate components: + - Data table: Primary business data columns + - Meta table: Internal system metadata with {orcapod.META_PREFIX} ('__') prefixes + - Context table: Data context information with {orcapod.CONTEXT_KEY} + + Future Packet subclass will also handle: + - Source info: Data provenance with {orcapod.SOURCE_PREFIX} ('_source_') prefixes + + When exposing to external tools, semantic types are encoded as + `_{semantic_type}_` prefixes (_path_config_file, _id_user_name). + + All operations return new instances, preserving immutability. + + Example: + >>> table = pa.Table.from_pydict({ + ... "user_id": [123], + ... "name": ["Alice"], + ... "__pipeline_version": ["v2.1.0"], + ... "{orcapod.CONTEXT_KEY}": ["financial_v1"] + ... }) + >>> datagram = ArrowDatagram(table) + >>> updated = datagram.update(name="Alice Smith") + """ + + def __init__( + self, + table: pa.Table, + meta_info: Mapping[str, DataValue] | None = None, + semantic_converter: SemanticConverter | None = None, + data_context: str | DataContext | None = None, + ) -> None: + """ + Initialize ArrowDatagram from PyArrow Table. + + Args: + table: PyArrow Table containing the data. Must have exactly one row. + semantic_converter: Optional converter for semantic type handling. + If None, will be created based on the data context and table schema. + data_context: Context key string or DataContext object. + If None and table contains context column, will extract from table. + + Raises: + ValueError: If table doesn't contain exactly one row. + + Note: + The input table is automatically split into data, meta, and context + components based on column naming conventions. + """ + # Validate table has exactly one row for datagram + if len(table) != 1: + raise ValueError( + "Table must contain exactly one row to be a valid datagram." + ) + + # Split table into data, meta, and context components + context_columns = ( + [constants.CONTEXT_KEY] + if constants.CONTEXT_KEY in table.column_names + else [] + ) + + # Extract context table if present + if constants.CONTEXT_KEY in table.column_names and data_context is None: + context_table = table.select([constants.CONTEXT_KEY]) + data_context = context_table[constants.CONTEXT_KEY].to_pylist()[0] + + # Initialize base class with data context + super().__init__(data_context) + + meta_columns = [ + col for col in table.column_names if col.startswith(constants.META_PREFIX) + ] + # Split table into components + self._data_table = table.drop_columns(context_columns + meta_columns) + self._meta_table = table.select(meta_columns) if meta_columns else None + + if len(self._data_table.column_names) == 0: + raise ValueError("Data table must contain at least one data column.") + + # Create semantic converter + if semantic_converter is None: + semantic_converter = SemanticConverter.from_semantic_schema( + schemas.SemanticSchema.from_arrow_schema( + self._data_table.schema, + self._data_context.semantic_type_registry, + ) + ) + self._semantic_converter = semantic_converter + + # process supplemented meta info if provided + if meta_info is not None: + # make sure it has the expected prefixes + meta_info = { + ( + f"{constants.META_PREFIX}{k}" + if not k.startswith(constants.META_PREFIX) + else k + ): v + for k, v in meta_info.items() + } + # Note that meta information cannot contain semantic types + typespec = typespec_utils.get_typespec_from_dict(meta_info) + new_meta_table = self._semantic_converter.from_python_to_arrow( + meta_info, typespec + ) + if self._meta_table is None: + self._meta_table = new_meta_table + else: + # drop any column that will be overwritten by the new meta table + keep_meta_columns = [ + c + for c in self._meta_table.column_names + if c not in new_meta_table.column_names + ] + self._meta_table = arrow_utils.hstack_tables( + self._meta_table.select(keep_meta_columns), new_meta_table + ) + + # Create data context table + data_context_schema = pa.schema({constants.CONTEXT_KEY: pa.large_string()}) + self._data_context_table = pa.Table.from_pylist( + [{constants.CONTEXT_KEY: self._data_context.context_key}], + schema=data_context_schema, + ) + + # Initialize caches + self._cached_python_schema: schemas.PythonSchema | None = None + self._cached_python_dict: dict[str, DataValue] | None = None + self._cached_meta_python_schema: schemas.PythonSchema | None = None + self._cached_content_hash: str | None = None + + # 1. Core Properties (Identity & Structure) + @property + def meta_columns(self) -> tuple[str, ...]: + """Return tuple of meta column names.""" + if self._meta_table is None: + return () + return tuple(self._meta_table.column_names) + + # 2. Dict-like Interface (Data Access) + def __getitem__(self, key: str) -> DataValue: + """Get data column value by key.""" + if key not in self._data_table.column_names: + raise KeyError(f"Data column '{key}' not found") + + return self._data_table[key].to_pylist()[0] + + def __contains__(self, key: str) -> bool: + """Check if data column exists.""" + return key in self._data_table.column_names + + def __iter__(self) -> Iterator[str]: + """Iterate over data column names.""" + return iter(self._data_table.column_names) + + def get(self, key: str, default: DataValue = None) -> DataValue: + """Get data column value with default.""" + if key in self._data_table.column_names: + return self.as_dict()[key] + return default + + # 3. Structural Information + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> tuple[str, ...]: + """Return tuple of column names.""" + # Start with data columns + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + result_keys = list(self._data_table.column_names) + + # Add context if requested + if include_context: + result_keys.append(constants.CONTEXT_KEY) + + # Add meta columns if requested + if include_meta_columns: + if include_meta_columns is True: + result_keys.extend(self.meta_columns) + elif isinstance(include_meta_columns, Collection): + # Filter meta columns by prefix matching + filtered_meta_cols = [ + col + for col in self.meta_columns + if any(col.startswith(prefix) for prefix in include_meta_columns) + ] + result_keys.extend(filtered_meta_cols) + + return tuple(result_keys) + + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> schemas.PythonSchema: + """ + Return Python schema for the datagram. + + Args: + include_meta_columns: Whether to include meta column types. + - True: include all meta column types + - Collection[str]: include meta column types matching these prefixes + - False: exclude meta column types + include_context: Whether to include context type + + Returns: + Python schema + """ + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + # Get data schema (cached) + if self._cached_python_schema is None: + self._cached_python_schema = ( + self._semantic_converter.from_arrow_to_python_schema( + self._data_table.schema + ) + ) + + schema = dict(self._cached_python_schema) + + # Add context if requested + if include_context: + schema[constants.CONTEXT_KEY] = str + + # Add meta schema if requested + if include_meta_columns and self._meta_table is not None: + if self._cached_meta_python_schema is None: + self._cached_meta_python_schema = ( + self._semantic_converter.from_arrow_to_python_schema( + self._meta_table.schema + ) + ) + meta_schema = dict(self._cached_meta_python_schema) + if include_meta_columns is True: + schema.update(meta_schema) + elif isinstance(include_meta_columns, Collection): + filtered_meta_schema = { + k: v + for k, v in meta_schema.items() + if any(k.startswith(prefix) for prefix in include_meta_columns) + } + schema.update(filtered_meta_schema) + + return schemas.PythonSchema(schema) + + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> pa.Schema: + """ + Return the PyArrow schema for this datagram. + + Args: + include_meta_columns: Whether to include meta columns in the schema. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include context column in the schema + + Returns: + PyArrow schema representing the datagram's structure + """ + # order matters + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + all_schemas = [self._data_table.schema] + + # Add context schema if requested + if include_context: + # TODO: reassess the efficiency of this approach + all_schemas.append(self._data_context_table.schema) + + # Add meta schema if requested + if include_meta_columns and self._meta_table is not None: + if include_meta_columns is True: + meta_schema = self._meta_table.schema + elif isinstance(include_meta_columns, Collection): + # Filter meta schema by prefix matching + matched_fields = [ + field + for field in self._meta_table.schema + if any( + field.name.startswith(prefix) for prefix in include_meta_columns + ) + ] + if matched_fields: + meta_schema = pa.schema(matched_fields) + else: + meta_schema = None + else: + meta_schema = None + + if meta_schema is not None: + all_schemas.append(meta_schema) + + return arrow_utils.join_arrow_schemas(*all_schemas) + + def content_hash(self) -> str: + """ + Calculate and return content hash of the datagram. + Only includes data columns, not meta columns or context. + + Returns: + Hash string of the datagram content + """ + if self._cached_content_hash is None: + self._cached_content_hash = self._data_context.arrow_hasher.hash_table( + self._data_table, + prefix_hasher_id=True, + ) + return self._cached_content_hash + + # 4. Format Conversions (Export) + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> dict[str, DataValue]: + """ + Return dictionary representation of the datagram. + + Args: + include_meta_columns: Whether to include meta columns. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include context key + + Returns: + Dictionary representation + """ + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + # Get data dict (cached) + if self._cached_python_dict is None: + self._cached_python_dict = self._semantic_converter.from_arrow_to_python( + self._data_table + )[0] + + result_dict = dict(self._cached_python_dict) + + # Add context if requested + if include_context: + result_dict[constants.CONTEXT_KEY] = self._data_context.context_key + + # Add meta data if requested + if include_meta_columns and self._meta_table is not None: + if include_meta_columns is True: + meta_dict = self._meta_table.to_pylist()[0] + elif isinstance(include_meta_columns, Collection): + meta_dict = self._meta_table.to_pylist()[0] + # Include only meta columns matching prefixes + meta_dict = { + k: v + for k, v in meta_dict.items() + if any(k.startswith(prefix) for prefix in include_meta_columns) + } + if meta_dict is not None: + result_dict.update(meta_dict) + + return result_dict + + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> pa.Table: + """ + Convert the datagram to an Arrow table. + + Args: + include_meta_columns: Whether to include meta columns. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include the context column + + Returns: + Arrow table representation + """ + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + all_tables = [self._data_table] + + # Add context if requested + if include_context: + all_tables.append(self._data_context_table) + + # Add meta columns if requested + if include_meta_columns and self._meta_table is not None: + meta_table = None + if include_meta_columns is True: + meta_table = self._meta_table + elif isinstance(include_meta_columns, Collection): + # Filter meta columns by prefix matching + # ensure all given prefixes start with the meta prefix + prefixes = ( + f"{constants.META_PREFIX}{prefix}" + if not prefix.startswith(constants.META_PREFIX) + else prefix + for prefix in include_meta_columns + ) + + matched_cols = [ + col + for col in self._meta_table.column_names + if any(col.startswith(prefix) for prefix in prefixes) + ] + if matched_cols: + meta_table = self._meta_table.select(matched_cols) + else: + meta_table = None + + if meta_table is not None: + all_tables.append(meta_table) + + return arrow_utils.hstack_tables(*all_tables) + + def as_arrow_compatible_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> dict[str, DataValue]: + """ + Return dictionary representation compatible with Arrow. + + Args: + include_meta_columns: Whether to include meta columns. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include context key + + Returns: + Dictionary representation compatible with Arrow + """ + return self.as_table( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ).to_pylist()[0] + + # 5. Meta Column Operations + def get_meta_value(self, key: str, default: DataValue = None) -> DataValue: + """ + Get a meta column value. + + Args: + key: Meta column key (with or without {orcapod.META_PREFIX} ('__') prefix) + default: Default value if not found + + Returns: + Meta column value + """ + if self._meta_table is None: + return default + + # Handle both prefixed and unprefixed keys + if not key.startswith(constants.META_PREFIX): + key = constants.META_PREFIX + key + + if key not in self._meta_table.column_names: + return default + + return self._meta_table[key].to_pylist()[0] + + def with_meta_columns(self, **meta_updates: DataValue) -> Self: + """ + Create a new ArrowDatagram with updated meta columns. + Maintains immutability by returning a new instance. + + Args: + **meta_updates: Meta column updates (keys will be prefixed with {orcapod.META_PREFIX} ('__') if needed) + + Returns: + New ArrowDatagram instance + """ + # Prefix the keys and prepare updates + prefixed_updates = {} + for k, v in meta_updates.items(): + if not k.startswith(constants.META_PREFIX): + k = constants.META_PREFIX + k + prefixed_updates[k] = v + + new_datagram = self.copy(include_cache=False) + + # Start with existing meta data + meta_dict = {} + if self._meta_table is not None: + meta_dict = self._meta_table.to_pylist()[0] + + # Apply updates + meta_dict.update(prefixed_updates) + + # TODO: properly handle case where meta data is None (it'll get inferred as NoneType) + + # Create new meta table + new_datagram._meta_table = ( + pa.Table.from_pylist([meta_dict]) if meta_dict else None + ) + return new_datagram + + def drop_meta_columns(self, *keys: str, ignore_missing: bool = False) -> Self: + """ + Create a new ArrowDatagram with specified meta columns dropped. + Maintains immutability by returning a new instance. + + Args: + *keys: Meta column keys to drop (with or without {orcapod.META_PREFIX} ('__') prefix) + + Returns: + New ArrowDatagram instance without specified meta columns + """ + if self._meta_table is None: + return self # No meta columns to drop + + # Normalize keys to have prefixes + prefixed_keys = set() + for key in keys: + if not key.startswith(constants.META_PREFIX): + key = constants.META_PREFIX + key + prefixed_keys.add(key) + + missing_keys = prefixed_keys - set(self._meta_table.column_names) + if missing_keys and not ignore_missing: + raise KeyError( + f"Following meta columns do not exist and cannot be dropped: {sorted(missing_keys)}" + ) + + new_datagram = self.copy(include_cache=False) + new_datagram._meta_table = self._meta_table.drop_columns(prefixed_keys) + + return new_datagram + + # 6. Data Column Operations + def select(self, *column_names: str) -> Self: + """ + Create a new ArrowDatagram with only specified data columns. + Maintains immutability by returning a new instance. + + Args: + *column_names: Data column names to keep + + Returns: + New ArrowDatagram instance with only specified data columns + """ + # Validate columns exist + missing_cols = set(column_names) - set(self._data_table.column_names) + if missing_cols: + raise ValueError(f"Columns not found: {missing_cols}") + + new_datagram = self.copy(include_cache=False) + new_datagram._data_table = new_datagram._data_table.select(column_names) + + return new_datagram + + def drop(self, *column_names: str, ignore_missing: bool = False) -> Self: + """ + Create a new ArrowDatagram with specified data columns dropped. + Maintains immutability by returning a new instance. + + Args: + *column_names: Data column names to drop + + Returns: + New ArrowDatagram instance without specified data columns + """ + + # Filter out specified data columns + missing = set(column_names) - set(self._data_table.column_names) + if missing and not ignore_missing: + raise KeyError( + f"Following columns do not exist and cannot be dropped: {sorted(missing)}" + ) + column_names = tuple(c for c in column_names if self._data_table.columns) + + new_datagram = self.copy(include_cache=False) + new_datagram._data_table = self._data_table.drop_columns(column_names) + # TODO: consider dropping extra semantic columns if they are no longer needed + return new_datagram + + def rename(self, column_mapping: Mapping[str, str]) -> Self: + """ + Create a new ArrowDatagram with data columns renamed. + Maintains immutability by returning a new instance. + + Args: + column_mapping: Mapping from old column names to new column names + + Returns: + New ArrowDatagram instance with renamed data columns + """ + # Create new schema with renamed fields, preserving original types + + if not column_mapping: + return self + + new_names = [column_mapping.get(k, k) for k in self._data_table.column_names] + + new_datagram = self.copy(include_cache=False) + new_datagram._data_table = new_datagram._data_table.rename_columns(new_names) + + # apply the same rename to the converters + new_datagram._semantic_converter = self._semantic_converter.rename( + column_mapping + ) + + return new_datagram + + def update(self, **updates: DataValue) -> Self: + """ + Create a new ArrowDatagram with specific column values updated. + + Args: + **updates: Column names and their new values + + Returns: + New ArrowDatagram instance with updated values + + Raises: + KeyError: If any specified column doesn't exist + + Example: + # Convert relative path to absolute path + updated = datagram.update(file_path="/absolute/path/to/file.txt") + + # Update multiple values + updated = datagram.update(status="processed", file_path="/new/path") + """ + # Only update if there are columns to update + if not updates: + return self + + # Validate all columns exist + missing_cols = set(updates.keys()) - set(self._data_table.column_names) + if missing_cols: + raise KeyError( + f"Only existing columns can be updated. Following columns were not found: {sorted(missing_cols)}" + ) + + new_datagram = self.copy(include_cache=False) + + updates_typespec = schemas.PythonSchema( + {k: v for k, v in self.types().items() if k in updates} + ) + update_table = self._semantic_converter.from_python_to_arrow( + updates, updates_typespec + ) + new_datagram._data_table = arrow_utils.hstack_tables( + self._data_table.drop_columns(list(updates.keys())), update_table + ).select(self._data_table.column_names) # adjsut the order to match original + + return new_datagram + + def with_columns( + self, + column_types: Mapping[str, type] | None = None, + **updates: DataValue, + ) -> Self: + """ + Create a new ArrowDatagram with new data columns added. + Maintains immutability by returning a new instance. + + Args: + column_updates: New data columns as a mapping + column_types: Optional type specifications for new columns + **kwargs: New data columns as keyword arguments + + Returns: + New ArrowDatagram instance with new data columns added + + Raises: + ValueError: If any column already exists (use update() instead) + """ + # Combine explicit updates with kwargs + + if not updates: + return self + + # Error if any of the columns already exists + existing_overlaps = set(updates.keys()) & set(self._data_table.column_names) + if existing_overlaps: + raise ValueError( + f"Columns already exist: {sorted(existing_overlaps)}. " + f"Use update() to modify existing columns." + ) + + # create a copy and perform in-place updates + new_datagram = self.copy() + + # TODO: consider simplifying this conversion logic + # prepare update's table + typespec = typespec_utils.get_typespec_from_dict(updates, column_types) + + updates_converter = SemanticConverter.from_semantic_schema( + schemas.SemanticSchema.from_typespec( + typespec, self._data_context.semantic_type_registry + ) + ) + # TODO: cleanup the handling of typespec python schema and various conversion points + new_data_table = updates_converter.from_python_to_arrow(updates, typespec) + + # perform in-place update + new_datagram._data_table = arrow_utils.hstack_tables( + new_datagram._data_table, new_data_table + ) + + # prepare the joined converter + new_datagram._semantic_converter = self._semantic_converter.join( + updates_converter + ) + return new_datagram + + # 7. Context Operations + def with_context_key(self, new_context_key: str) -> Self: + """ + Create a new ArrowDatagram with a different data context key. + Maintains immutability by returning a new instance. + + Args: + new_context_key: New data context key string + + Returns: + New ArrowDatagram instance with new context + """ + # TODO: consider if there is a more efficient way to handle context + # Combine all tables for reconstruction + combined_table = self._data_table + if self._meta_table is not None: + combined_table = arrow_utils.hstack_tables(combined_table, self._meta_table) + + return self.__class__( + table=combined_table, + data_context=new_context_key, + # Note: semantic_converter will be rebuilt for new context + ) + + # 8. Utility Operations + def copy(self, include_cache: bool = True) -> Self: + """Return a copy of the datagram.""" + new_datagram = super().copy() + + new_datagram._data_table = self._data_table + new_datagram._meta_table = self._meta_table + new_datagram._data_context = self._data_context + new_datagram._semantic_converter = self._semantic_converter + + if include_cache: + new_datagram._cached_python_schema = self._cached_python_schema + new_datagram._cached_python_dict = self._cached_python_dict + new_datagram._cached_content_hash = self._cached_content_hash + new_datagram._cached_meta_python_schema = self._cached_meta_python_schema + else: + new_datagram._cached_python_schema = None + new_datagram._cached_python_dict = None + new_datagram._cached_content_hash = None + new_datagram._cached_meta_python_schema = None + + return new_datagram + + # 9. String Representations + def __str__(self) -> str: + """ + Return user-friendly string representation. + + Shows the datagram as a simple dictionary for user-facing output, + messages, and logging. Only includes data columns for clean output. + + Returns: + Dictionary-style string representation of data columns only. + + Example: + >>> str(datagram) + "{'user_id': 123, 'name': 'Alice'}" + >>> print(datagram) + {'user_id': 123, 'name': 'Alice'} + """ + return str(self.as_dict()) + + def __repr__(self) -> str: + """ + Return detailed string representation for debugging. + + Shows the datagram type and comprehensive information including + data columns, meta columns count, and context for debugging purposes. + + Returns: + Detailed representation with type and metadata information. + + Example: + >>> repr(datagram) + "ArrowDatagram(data={'user_id': 123, 'name': 'Alice'}, meta_columns=2, context='std:v1.0.0:abc123')" + """ + data_dict = self.as_dict() + meta_count = len(self.meta_columns) + context_key = self.data_context_key + + return ( + f"{self.__class__.__name__}(" + f"data={data_dict}, " + f"meta_columns={meta_count}, " + f"context='{context_key}'" + f")" + ) diff --git a/src/orcapod/data/datagrams/arrow_tag_packet.py b/src/orcapod/data/datagrams/arrow_tag_packet.py new file mode 100644 index 0000000..503b83e --- /dev/null +++ b/src/orcapod/data/datagrams/arrow_tag_packet.py @@ -0,0 +1,267 @@ +from hmac import new +import logging +from collections.abc import Collection, Mapping +from typing import Self + + +import pyarrow as pa + +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.data.context import ( + DataContext, +) +from orcapod.types import schemas +from orcapod.types.core import DataValue +from orcapod.types.semantic_converter import SemanticConverter +from orcapod.utils import arrow_utils + +from orcapod.data.datagrams.arrow_datagram import ArrowDatagram + +logger = logging.getLogger(__name__) + + +class ArrowTag(ArrowDatagram): + """ + A tag implementation using Arrow table backend. + + Represents a single-row Arrow table that can be converted to Python + dictionary representation while caching computed values for efficiency. + + Initialize with an Arrow table. + + Args: + table: Single-row Arrow table representing the tag + + Raises: + ValueError: If table doesn't contain exactly one row + """ + + def __init__( + self, + table: pa.Table, + semantic_converter: SemanticConverter | None = None, + data_context: str | DataContext | None = None, + ) -> None: + if len(table) != 1: + raise ValueError( + "ArrowTag should only contain a single row, " + "as it represents a single tag." + ) + super().__init__( + table=table, + semantic_converter=semantic_converter, + data_context=data_context, + ) + + +class ArrowPacket(ArrowDatagram): + """ + Arrow table-based packet implementation with comprehensive features. + + A packet implementation that uses Arrow tables as the primary storage format, + providing efficient memory usage and columnar data operations while supporting + source information tracking and content hashing. + + + Initialize ArrowPacket with Arrow table and configuration. + + Args: + table: Single-row Arrow table representing the packet + source_info: Optional source information mapping + semantic_converter: Optional semantic converter + semantic_type_registry: Registry for semantic types + finger_print: Optional fingerprint for tracking + arrow_hasher: Optional Arrow hasher + post_hash_callback: Optional callback after hash calculation + skip_source_info_extraction: Whether to skip source info processing + + Raises: + ValueError: If table doesn't contain exactly one row + """ + + def __init__( + self, + table: pa.Table, + meta_info: Mapping[str, DataValue] | None = None, + source_info: Mapping[str, str | None] | None = None, + semantic_converter: SemanticConverter | None = None, + data_context: str | DataContext | None = None, + ) -> None: + if len(table) != 1: + raise ValueError( + "ArrowPacket should only contain a single row, " + "as it represents a single packet." + ) + if source_info is None: + source_info = {} + + # normalize the table to ensure it has the expected source_info columns + # TODO: use simpler function to ensure source_info columns + data_table, prefixed_tables = arrow_utils.prepare_prefixed_columns( + table, + {constants.SOURCE_PREFIX: source_info}, + exclude_columns=[constants.CONTEXT_KEY], + exclude_prefixes=[constants.META_PREFIX], + ) + + super().__init__( + data_table, + meta_info=meta_info, + semantic_converter=semantic_converter, + data_context=data_context, + ) + self._source_info_table = prefixed_tables[constants.SOURCE_PREFIX] + + self._cached_source_info: dict[str, str | None] | None = None + self._cached_python_schema: schemas.PythonSchema | None = None + self._cached_content_hash: str | None = None + + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> tuple[str, ...]: + keys = super().keys( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + keys += tuple(f"{constants.SOURCE_PREFIX}{k}" for k in self.keys()) + return keys + + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> schemas.PythonSchema: + """Return copy of the Python schema.""" + schema = super().types( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + for key in self.keys(): + schema[f"{constants.SOURCE_PREFIX}{key}"] = str + return schema + + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> pa.Schema: + """ + Return the PyArrow schema for this datagram. + + Args: + include_data_context: Whether to include data context column in the schema + include_source: Whether to include source info columns in the schema + + Returns: + PyArrow schema representing the datagram's structure + """ + schema = super().arrow_schema( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + return arrow_utils.join_arrow_schemas( + schema, self._source_info_table.schema + ) + return schema + + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> dict[str, DataValue]: + """ + Convert to dictionary representation. + + Args: + include_source: Whether to include source info fields + + Returns: + Dictionary representation of the packet + """ + return_dict = super().as_dict( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + return_dict.update( + { + f"{constants.SOURCE_PREFIX}{k}": v + for k, v in self.source_info().items() + } + ) + return return_dict + + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> pa.Table: + table = super().as_table( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + # add source_info only for existing data columns + table = arrow_utils.hstack_tables(table, self._source_info_table) + return table + + def as_datagram( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_source: bool = False, + ) -> ArrowDatagram: + table = self.as_table( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_source=include_source, + ) + return ArrowDatagram( + table, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + def source_info(self) -> dict[str, str | None]: + """ + Return source information for all keys. + + Returns: + Copy of the dictionary mapping field names to their source info + """ + if self._cached_source_info is None: + self._cached_source_info = { + k.removeprefix(constants.SOURCE_PREFIX): v + for k, v in self._source_info_table.to_pylist()[0].items() + } + return self._cached_source_info.copy() + + # 8. Utility Operations + def copy(self, include_cache: bool = True) -> Self: + """Return a copy of the datagram.""" + new_packet = super().copy(include_cache=include_cache) + + if include_cache: + new_packet._cached_source_info = self._cached_source_info + + return new_packet diff --git a/src/orcapod/data/datagrams/base.py b/src/orcapod/data/datagrams/base.py new file mode 100644 index 0000000..9f6d4a8 --- /dev/null +++ b/src/orcapod/data/datagrams/base.py @@ -0,0 +1,276 @@ +""" +Data structures and utilities for working with datagrams in OrcaPod. + +This module provides classes and functions for handling packet-like data structures +that can represent data in various formats (Python dicts, Arrow tables, etc.) while +maintaining type information, source metadata, and semantic type conversion capability. + +Key classes: +- SemanticConverter: Converts between different data representations. Intended for internal use. +- DictDatagram: Immutable dict-based data structure +- PythonDictPacket: Python dict-based packet with source info +- ArrowPacket: Arrow table-based packet implementation +- PythonDictTag/ArrowTag: Tag implementations for data identification + +The module also provides utilities for schema validation, table operations, +and type conversions between semantic stores, Python stores, and Arrow tables. +""" + +import logging +from abc import ABC, abstractmethod +from collections.abc import Collection, Iterator, Mapping +from typing import Any, Self, TypeAlias + +import pyarrow as pa + +from orcapod.data.context import ( + DataContext, +) +from orcapod.types import TypeSpec +from orcapod.types.core import DataValue + +logger = logging.getLogger(__name__) + +# A conveniece packet-like type that defines a value that can be +# converted to a packet. It's broader than Packet and a simple mapping +# from string keys to DataValue (e.g., int, float, str) can be regarded +# as PacketLike, allowing for more flexible interfaces. +# Anything that requires Packet-like data but without the strict features +# of a Packet should accept PacketLike. +# One should be careful when using PacketLike as a return type as it does not +# enforce the typespec or source_info, which are important for packet integrity. +PacketLike: TypeAlias = Mapping[str, DataValue] + +PythonStore: TypeAlias = Mapping[str, DataValue] + + +class ImmutableDict(Mapping[str, DataValue]): + """ + An immutable dictionary-like container for DataValues. + + Provides a read-only view of a dictionary mapping strings to DataValues, + implementing the Mapping protocol for compatibility with dict-like operations. + + Initialize with data from a mapping. + Args: + data: Source mapping to copy data from + """ + + def __init__(self, data: Mapping[str, DataValue]): + self._data = dict(data) + + def __getitem__(self, key: str) -> DataValue: + return self._data[key] + + def __iter__(self): + return iter(self._data) + + def __len__(self) -> int: + return len(self._data) + + def __repr__(self) -> str: + return self._data.__repr__() + + def __str__(self) -> str: + return self._data.__str__() + + def __or__(self, other: Mapping[str, DataValue]) -> Self: + """ + Create a new ImmutableDict by merging with another mapping. + + Args: + other: Another mapping to merge with + + Returns: + A new ImmutableDict containing the combined data + """ + return self.__class__(self._data | dict(other)) + + +def contains_prefix_from(column: str, prefixes: Collection[str]) -> bool: + """ + Check if a column name matches any of the given prefixes. + + Args: + column: Column name to check + prefixes: Collection of prefixes to match against + + Returns: + True if the column starts with any of the prefixes, False otherwise + """ + for prefix in prefixes: + if column.startswith(prefix): + return True + return False + + +class BaseDatagram(ABC): + """ + Abstract base class for immutable datagram implementations. + + Provides shared functionality and enforces consistent interface across + different storage backends (dict, Arrow table, etc.). Concrete subclasses + must implement the abstract methods to handle their specific storage format. + + The base class only manages the data context key string - how that key + is interpreted and used is left to concrete implementations. + """ + + def __init__(self, data_context: DataContext | str | None = None) -> None: + """ + Initialize base datagram with data context. + + Args: + data_context: Context for semantic interpretation. Can be a string key + or a DataContext object, or None for default. + """ + self._data_context = DataContext.resolve_data_context(data_context) + + # 1. Core Properties (Identity & Structure) + @property + def data_context_key(self) -> str: + """Return the data context key.""" + return self._data_context.context_key + + @property + @abstractmethod + def meta_columns(self) -> tuple[str, ...]: + """Return tuple of meta column names.""" + ... + + # 2. Dict-like Interface (Data Access) + @abstractmethod + def __getitem__(self, key: str) -> DataValue: + """Get data column value by key.""" + ... + + @abstractmethod + def __contains__(self, key: str) -> bool: + """Check if data column exists.""" + ... + + @abstractmethod + def __iter__(self) -> Iterator[str]: + """Iterate over data column names.""" + ... + + @abstractmethod + def get(self, key: str, default: DataValue = None) -> DataValue: + """Get data column value with default.""" + ... + + # 3. Structural Information + @abstractmethod + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> tuple[str, ...]: + """Return tuple of column names.""" + ... + + @abstractmethod + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> TypeSpec: + """Return type specification for the datagram.""" + ... + + @abstractmethod + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> pa.Schema: + """Return the PyArrow schema for this datagram.""" + ... + + @abstractmethod + def content_hash(self) -> str: + """Calculate and return content hash of the datagram.""" + ... + + # 4. Format Conversions (Export) + @abstractmethod + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> dict[str, DataValue]: + """Return dictionary representation of the datagram.""" + ... + + @abstractmethod + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> pa.Table: + """Convert the datagram to an Arrow table.""" + ... + + # 5. Meta Column Operations + @abstractmethod + def get_meta_value(self, key: str, default: DataValue = None) -> DataValue: + """Get a meta column value.""" + ... + + @abstractmethod + def with_meta_columns(self, **updates: DataValue) -> Self: + """Create new datagram with updated meta columns.""" + ... + + @abstractmethod + def drop_meta_columns(self, *keys: str) -> Self: + """Create new datagram with specified meta columns removed.""" + ... + + # 6. Data Column Operations + @abstractmethod + def select(self, *column_names: str) -> Self: + """Create new datagram with only specified data columns.""" + ... + + @abstractmethod + def drop(self, *column_names: str) -> Self: + """Create new datagram with specified data columns removed.""" + ... + + @abstractmethod + def rename(self, column_mapping: Mapping[str, str]) -> Self: + """Create new datagram with data columns renamed.""" + ... + + @abstractmethod + def update(self, **updates: DataValue) -> Self: + """Create new datagram with existing column values updated.""" + ... + + @abstractmethod + def with_columns( + self, + column_types: Mapping[str, type] | None = None, + **updates: DataValue, + ) -> Self: + """Create new datagram with additional data columns.""" + ... + + # 7. Context Operations + @abstractmethod + def with_context_key(self, new_context_key: str) -> Self: + """Create new datagram with different data context.""" + ... + + # 8. Utility Operations + def copy(self) -> Self: + """Create a shallow copy of the datagram.""" + new_datagram = object.__new__(self.__class__) + new_datagram._data_context = self._data_context + return new_datagram diff --git a/src/orcapod/data/datagrams/dict_datagram.py b/src/orcapod/data/datagrams/dict_datagram.py new file mode 100644 index 0000000..9f7088f --- /dev/null +++ b/src/orcapod/data/datagrams/dict_datagram.py @@ -0,0 +1,836 @@ +from curses import meta +import logging +from collections.abc import Collection, Iterator, Mapping +from typing import Self, cast + +import pyarrow as pa + +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.data.context import ( + DataContext, +) +from orcapod.data.datagrams.base import BaseDatagram +from orcapod.types import TypeSpec, schemas +from orcapod.types import typespec_utils as tsutils +from orcapod.types.core import DataValue +from orcapod.types.semantic_converter import SemanticConverter +from orcapod.utils import arrow_utils + +logger = logging.getLogger(__name__) + + +class DictDatagram(BaseDatagram): + """ + Immutable datagram implementation using dictionary as storage backend. + + This implementation uses composition (not inheritance from Mapping) to maintain + control over the interface while leveraging dictionary efficiency for data access. + Provides clean separation between data, meta, and context components. + + The underlying data is split into separate components: + - Data dict: Primary business data columns + - Meta dict: Internal system metadata with {orcapod.META_PREFIX} ('__') prefixes + - Context: Data context information with {orcapod.CONTEXT_KEY} + + Future Packet subclass will also handle: + - Source info: Data provenance with {orcapod.SOURCE_PREFIX} ('_source_') prefixes + + When exposing to external tools, semantic types are encoded as + `_{semantic_type}_` prefixes (_path_config_file, _id_user_name). + + All operations return new instances, preserving immutability. + + Example: + >>> data = {{ + ... "user_id": 123, + ... "name": "Alice", + ... "__pipeline_version": "v2.1.0", + ... "{orcapod.CONTEXT_KEY}": "financial_v1" + ... }} + >>> datagram = DictDatagram(data) + >>> updated = datagram.update(name="Alice Smith") + """ + + def __init__( + self, + data: Mapping[str, DataValue], + typespec: TypeSpec | None = None, + meta_info: Mapping[str, DataValue] | None = None, + semantic_converter: SemanticConverter | None = None, + data_context: str | DataContext | None = None, + ) -> None: + """ + Initialize DictDatagram from dictionary data. + + Args: + data: Source data mapping containing all column data. + typespec: Optional type specification for fields. + semantic_converter: Optional converter for semantic type handling. + If None, will be created based on data context and inferred types. + data_context: Data context for semantic type resolution. + If None and data contains context column, will extract from data. + + Note: + The input data is automatically split into data, meta, and context + components based on column naming conventions. + """ + # Parse through data and extract different column types + data_columns = {} + meta_columns = {} + extracted_context = None + + for k, v in data.items(): + if k == constants.CONTEXT_KEY: + # Extract data context but keep it separate from meta data + if data_context is None: + extracted_context = v + # Don't store context in meta_data - it's managed separately + elif k.startswith(constants.META_PREFIX): + # Double underscore = meta metadata + meta_columns[k] = v + else: + # Everything else = user data (including _source_ and semantic types) + data_columns[k] = v + + # Initialize base class with data context + final_context = data_context or cast(str, extracted_context) + super().__init__(final_context) + + # Store data and meta components separately (immutable) + self._data = dict(data_columns) + if meta_info is not None: + meta_columns.update(meta_info) + self._meta_data = meta_columns + + # Combine provided typespec info with inferred typespec from content + # If the column value is None and no type spec is provided, defaults to str. + self._data_python_schema = schemas.PythonSchema( + tsutils.get_typespec_from_dict( + self._data, + typespec, + ) + ) + + # Create semantic converter + if semantic_converter is None: + semantic_converter = SemanticConverter.from_semantic_schema( + self._data_python_schema.to_semantic_schema( + semantic_type_registry=self._data_context.semantic_type_registry + ), + ) + self._semantic_converter = semantic_converter + + # Create schema for meta data + self._meta_python_schema = schemas.PythonSchema( + tsutils.get_typespec_from_dict( + self._meta_data, + typespec=typespec, + ) + ) + + # Initialize caches + self._cached_data_table: pa.Table | None = None + self._cached_meta_table: pa.Table | None = None + self._cached_content_hash: str | None = None + self._cached_data_arrow_schema: pa.Schema | None = None + self._cached_meta_arrow_schema: pa.Schema | None = None + + # 1. Core Properties (Identity & Structure) + @property + def meta_columns(self) -> tuple[str, ...]: + """Return tuple of meta column names.""" + return tuple(self._meta_data.keys()) + + # 2. Dict-like Interface (Data Access) + def __getitem__(self, key: str) -> DataValue: + """Get data column value by key.""" + if key not in self._data: + raise KeyError(f"Data column '{key}' not found") + return self._data[key] + + def __contains__(self, key: str) -> bool: + """Check if data column exists.""" + return key in self._data + + def __iter__(self) -> Iterator[str]: + """Iterate over data column names.""" + return iter(self._data) + + def get(self, key: str, default: DataValue = None) -> DataValue: + """Get data column value with default.""" + return self._data.get(key, default) + + # 3. Structural Information + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> tuple[str, ...]: + """Return tuple of column names.""" + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + # Start with data columns + result_keys = list(self._data.keys()) + + # Add context if requested + if include_context: + result_keys.append(constants.CONTEXT_KEY) + + # Add meta columns if requested + if include_meta_columns: + if include_meta_columns is True: + result_keys.extend(self.meta_columns) + elif isinstance(include_meta_columns, Collection): + # Filter meta columns by prefix matching + filtered_meta_cols = [ + col + for col in self.meta_columns + if any(col.startswith(prefix) for prefix in include_meta_columns) + ] + result_keys.extend(filtered_meta_cols) + + return tuple(result_keys) + + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> schemas.PythonSchema: + """ + Return Python schema for the datagram. + + Args: + include_meta_columns: Whether to include meta column types. + - True: include all meta column types + - Collection[str]: include meta column types matching these prefixes + - False: exclude meta column types + include_context: Whether to include context type + + Returns: + Python schema + """ + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + # Start with data schema + schema = dict(self._data_python_schema) + + # Add context if requested + if include_context: + schema[constants.CONTEXT_KEY] = str + + # Add meta schema if requested + if include_meta_columns and self._meta_data: + if include_meta_columns is True: + schema.update(self._meta_python_schema) + elif isinstance(include_meta_columns, Collection): + filtered_meta_schema = { + k: v + for k, v in self._meta_python_schema.items() + if any(k.startswith(prefix) for prefix in include_meta_columns) + } + schema.update(filtered_meta_schema) + + return schemas.PythonSchema(schema) + + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> pa.Schema: + """ + Return the PyArrow schema for this datagram. + + Args: + include_meta_columns: Whether to include meta columns in the schema. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include context column in the schema + + Returns: + PyArrow schema representing the datagram's structure + """ + include_meta_columns = include_all_info or include_meta_columns + include_context = include_all_info or include_context + + # Build data schema (cached) + if self._cached_data_arrow_schema is None: + self._cached_data_arrow_schema = ( + self._semantic_converter.from_python_to_arrow_schema( + self._data_python_schema + ) + ) + + all_schemas = [self._cached_data_arrow_schema] + + # Add context schema if requested + if include_context: + context_schema = pa.schema([pa.field(constants.CONTEXT_KEY, pa.string())]) + all_schemas.append(context_schema) + + # Add meta schema if requested + if include_meta_columns and self._meta_data: + if self._cached_meta_arrow_schema is None: + self._cached_meta_arrow_schema = ( + self._semantic_converter.from_python_to_arrow_schema( + self._meta_python_schema + ) + ) + + assert self._cached_meta_arrow_schema is not None, ( + "Meta Arrow schema should be initialized by now" + ) + + if include_meta_columns is True: + meta_schema = self._cached_meta_arrow_schema + elif isinstance(include_meta_columns, Collection): + # Filter meta schema by prefix matching + matched_fields = [ + field + for field in self._cached_meta_arrow_schema + if any( + field.name.startswith(prefix) for prefix in include_meta_columns + ) + ] + if matched_fields: + meta_schema = pa.schema(matched_fields) + else: + meta_schema = None + else: + meta_schema = None + + if meta_schema is not None: + all_schemas.append(meta_schema) + + return arrow_utils.join_arrow_schemas(*all_schemas) + + def content_hash(self) -> str: + """ + Calculate and return content hash of the datagram. + Only includes data columns, not meta columns or context. + + Returns: + Hash string of the datagram content + """ + if self._cached_content_hash is None: + self._cached_content_hash = self._data_context.arrow_hasher.hash_table( + self.as_table(include_meta_columns=False, include_context=False), + prefix_hasher_id=True, + ) + return self._cached_content_hash + + # 4. Format Conversions (Export) + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> dict[str, DataValue]: + """ + Return dictionary representation of the datagram. + + Args: + include_meta_columns: Whether to include meta columns. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include context key + + Returns: + Dictionary representation + """ + include_context = include_all_info or include_context + include_meta_columns = include_all_info or include_meta_columns + + result_dict = dict(self._data) # Start with user data + + # Add context if requested + if include_context: + result_dict[constants.CONTEXT_KEY] = self._data_context.context_key + + # Add meta columns if requested + if include_meta_columns and self._meta_data: + if include_meta_columns is True: + # Include all meta columns + result_dict.update(self._meta_data) + elif isinstance(include_meta_columns, Collection): + # Include only meta columns matching prefixes + filtered_meta_data = { + k: v + for k, v in self._meta_data.items() + if any(k.startswith(prefix) for prefix in include_meta_columns) + } + result_dict.update(filtered_meta_data) + + return result_dict + + def _get_meta_arrow_table(self) -> pa.Table: + if self._cached_meta_table is None: + arrow_schema = self._get_meta_arrow_schema() + self._cached_meta_table = pa.Table.from_pylist( + [self._meta_data], + schema=arrow_schema, + ) + assert self._cached_meta_table is not None, ( + "Meta Arrow table should be initialized by now" + ) + return self._cached_meta_table + + def _get_meta_arrow_schema(self) -> pa.Schema: + if self._cached_meta_arrow_schema is None: + self._cached_meta_arrow_schema = ( + self._semantic_converter.from_python_to_arrow_schema( + self._meta_python_schema + ) + ) + assert self._cached_meta_arrow_schema is not None, ( + "Meta Arrow schema should be initialized by now" + ) + return self._cached_meta_arrow_schema + + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> pa.Table: + """ + Convert the datagram to an Arrow table. + + Args: + include_meta_columns: Whether to include meta columns. + - True: include all meta columns + - Collection[str]: include meta columns matching these prefixes + - False: exclude meta columns + include_context: Whether to include the context column + + Returns: + Arrow table representation + """ + include_context = include_all_info or include_context + include_meta_columns = include_all_info or include_meta_columns + + # Build data table (cached) + if self._cached_data_table is None: + self._cached_data_table = self._semantic_converter.from_python_to_arrow( + self._data, + self._data_python_schema, + ) + assert self._cached_data_table is not None, ( + "Data Arrow table should be initialized by now" + ) + result_table = self._cached_data_table + + # Add context if requested + if include_context: + result_table = result_table.append_column( + constants.CONTEXT_KEY, + pa.array([self._data_context.context_key], type=pa.large_string()), + ) + + # Add meta columns if requested + meta_table = None + if include_meta_columns and self._meta_data: + meta_table = self._get_meta_arrow_table() + # Select appropriate meta columns + if isinstance(include_meta_columns, Collection): + # Filter meta columns by prefix matching + matched_cols = [ + col + for col in self._meta_data.keys() + if any(col.startswith(prefix) for prefix in include_meta_columns) + ] + if matched_cols: + meta_table = meta_table.select(matched_cols) + else: + meta_table = None + + # Combine tables if we have meta columns to add + if meta_table is not None: + result_table = arrow_utils.hstack_tables(result_table, meta_table) + + return result_table + + # 5. Meta Column Operations + def get_meta_value(self, key: str, default: DataValue = None) -> DataValue: + """ + Get meta column value with optional default. + + Args: + key: Meta column key (with or without {orcapod.META_PREFIX} ('__') prefix). + default: Value to return if meta column doesn't exist. + + Returns: + Meta column value if exists, otherwise the default value. + """ + # Handle both prefixed and unprefixed keys + if not key.startswith(constants.META_PREFIX): + key = constants.META_PREFIX + key + + return self._meta_data.get(key, default) + + def with_meta_columns(self, **meta_updates: DataValue) -> "DictDatagram": + """ + Create a new DictDatagram with updated meta columns. + Maintains immutability by returning a new instance. + + Args: + **meta_updates: Meta column updates (keys will be prefixed with {orcapod.META_PREFIX} ('__') if needed) + + Returns: + New DictDatagram instance + """ + # Prefix the keys and prepare updates + prefixed_updates = {} + for k, v in meta_updates.items(): + if not k.startswith(constants.META_PREFIX): + k = constants.META_PREFIX + k + prefixed_updates[k] = v + + # Start with existing meta data + new_meta_data = dict(self._meta_data) + new_meta_data.update(prefixed_updates) + + # Reconstruct full data dict for new instance + full_data = dict(self._data) # User data + full_data.update(new_meta_data) # Meta data + + return DictDatagram( + data=full_data, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + def drop_meta_columns( + self, *keys: str, ignore_missing: bool = False + ) -> "DictDatagram": + """ + Create a new DictDatagram with specified meta columns dropped. + Maintains immutability by returning a new instance. + + Args: + *keys: Meta column keys to drop (with or without {orcapod.META_PREFIX} ('__') prefix) + ignore_missing: If True, ignore missing meta columns without raising an error. + + Raises: + KeyError: If any specified meta column to drop doesn't exist and ignore_missing=False. + + Returns: + New DictDatagram instance without specified meta columns + """ + # Normalize keys to have prefixes + prefixed_keys = set() + for key in keys: + if not key.startswith(constants.META_PREFIX): + key = constants.META_PREFIX + key + prefixed_keys.add(key) + + missing_keys = prefixed_keys - set(self._meta_data.keys()) + if missing_keys and not ignore_missing: + raise KeyError( + f"Following meta columns do not exist and cannot be dropped: {sorted(missing_keys)}" + ) + + # Filter out specified meta columns + new_meta_data = { + k: v for k, v in self._meta_data.items() if k not in prefixed_keys + } + + # Reconstruct full data dict for new instance + full_data = dict(self._data) # User data + full_data.update(new_meta_data) # Filtered meta data + + return DictDatagram( + data=full_data, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + # 6. Data Column Operations + def select(self, *column_names: str) -> "DictDatagram": + """ + Create a new DictDatagram with only specified data columns. + Maintains immutability by returning a new instance. + + Args: + *column_names: Data column names to keep + + Returns: + New DictDatagram instance with only specified data columns + """ + # Validate columns exist + missing_cols = set(column_names) - set(self._data.keys()) + if missing_cols: + raise KeyError(f"Columns not found: {missing_cols}") + + # Keep only specified data columns + new_data = {k: v for k, v in self._data.items() if k in column_names} + + # Reconstruct full data dict for new instance + full_data = new_data # Selected user data + full_data.update(self._meta_data) # Keep existing meta data + + return DictDatagram( + data=full_data, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + def drop(self, *column_names: str, ignore_missing: bool = False) -> "DictDatagram": + """ + Create a new DictDatagram with specified data columns dropped. + Maintains immutability by returning a new instance. + + Args: + *column_names: Data column names to drop + + Returns: + New DictDatagram instance without specified data columns + """ + # Filter out specified data columns + missing = set(column_names) - set(self._data.keys()) + if missing and not ignore_missing: + raise KeyError( + f"Following columns do not exist and cannot be dropped: {sorted(missing)}" + ) + + new_data = {k: v for k, v in self._data.items() if k not in column_names} + + if not new_data: + raise ValueError("Cannot drop all data columns") + + # Reconstruct full data dict for new instance + full_data = new_data # Filtered user data + full_data.update(self._meta_data) # Keep existing meta data + + return DictDatagram( + data=full_data, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + def rename(self, column_mapping: Mapping[str, str]) -> "DictDatagram": + """ + Create a new DictDatagram with data columns renamed. + Maintains immutability by returning a new instance. + + Args: + column_mapping: Mapping from old column names to new column names + + Returns: + New DictDatagram instance with renamed data columns + """ + # Rename data columns according to mapping, preserving original types + new_data = {} + for old_name, value in self._data.items(): + new_name = column_mapping.get(old_name, old_name) + new_data[new_name] = value + + # Handle typespec updates for renamed columns + new_typespec = None + if self._data_python_schema: + existing_typespec = dict(self._data_python_schema) + + # Rename types according to column mapping + renamed_typespec = {} + for old_name, old_type in existing_typespec.items(): + new_name = column_mapping.get(old_name, old_name) + renamed_typespec[new_name] = old_type + + new_typespec = renamed_typespec + + # Reconstruct full data dict for new instance + full_data = new_data # Renamed user data + full_data.update(self._meta_data) # Keep existing meta data + + return DictDatagram( + data=full_data, + typespec=new_typespec, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + def update(self, **updates: DataValue) -> "DictDatagram": + """ + Create a new DictDatagram with existing column values updated. + Maintains immutability by returning a new instance. + + Args: + **updates: Column names and their new values (columns must exist) + + Returns: + New DictDatagram instance with updated values + + Raises: + KeyError: If any column doesn't exist (use with_columns() to add new columns) + """ + if not updates: + return self + + # Error if any column doesn't exist + missing_columns = set(updates.keys()) - set(self._data.keys()) + if missing_columns: + raise KeyError( + f"Columns not found: {sorted(missing_columns)}. " + f"Use with_columns() to add new columns." + ) + + # Update existing columns + new_data = dict(self._data) + new_data.update(updates) + + # Reconstruct full data dict for new instance + full_data = new_data # Updated user data + full_data.update(self._meta_data) # Keep existing meta data + + return DictDatagram( + data=full_data, + semantic_converter=self._semantic_converter, # Keep existing converter + data_context=self._data_context, + ) + + def with_columns( + self, + column_types: Mapping[str, type] | None = None, + **updates: DataValue, + ) -> "DictDatagram": + """ + Create a new DictDatagram with new data columns added. + Maintains immutability by returning a new instance. + + Args: + column_updates: New data columns as a mapping + column_types: Optional type specifications for new columns + **kwargs: New data columns as keyword arguments + + Returns: + New DictDatagram instance with new data columns added + + Raises: + ValueError: If any column already exists (use update() instead) + """ + # Combine explicit updates with kwargs + + if not updates: + return self + + # Error if any column already exists + existing_overlaps = set(updates.keys()) & set(self._data.keys()) + if existing_overlaps: + raise ValueError( + f"Columns already exist: {sorted(existing_overlaps)}. " + f"Use update() to modify existing columns." + ) + + # Update user data with new columns + new_data = dict(self._data) + new_data.update(updates) + + # Create updated typespec - handle None values by defaulting to str + typespec = self.types() + if column_types is not None: + typespec.update(column_types) + + new_typespec = tsutils.get_typespec_from_dict( + new_data, + typespec=typespec, + ) + + # Reconstruct full data dict for new instance + full_data = new_data # Updated user data + full_data.update(self._meta_data) # Keep existing meta data + + return DictDatagram( + data=full_data, + typespec=new_typespec, + # semantic converter needs to be rebuilt for new columns + data_context=self._data_context, + ) + + # 7. Context Operations + def with_context_key(self, new_context_key: str) -> "DictDatagram": + """ + Create a new DictDatagram with a different data context key. + Maintains immutability by returning a new instance. + + Args: + new_context_key: New data context key string + + Returns: + New DictDatagram instance with new context + """ + # Reconstruct full data dict for new instance + full_data = dict(self._data) # User data + full_data.update(self._meta_data) # Meta data + + return DictDatagram( + data=full_data, + data_context=new_context_key, # New context + # Note: semantic_converter will be rebuilt for new context + ) + + # 8. Utility Operations + def copy(self, include_cache: bool = True) -> Self: + """ + Create a shallow copy of the datagram. + + Returns a new datagram instance with the same data and cached values. + This is more efficient than reconstructing from scratch when you need + an identical datagram instance. + + Returns: + New DictDatagram instance with copied data and caches. + """ + new_datagram = super().copy() + new_datagram._data = self._data.copy() + new_datagram._meta_data = self._meta_data.copy() + new_datagram._data_python_schema = self._data_python_schema.copy() + new_datagram._semantic_converter = self._semantic_converter + new_datagram._meta_python_schema = self._meta_python_schema.copy() + + if include_cache: + new_datagram._cached_data_table = self._cached_data_table + new_datagram._cached_meta_table = self._cached_meta_table + new_datagram._cached_content_hash = self._cached_content_hash + new_datagram._cached_data_arrow_schema = self._cached_data_arrow_schema + new_datagram._cached_meta_arrow_schema = self._cached_meta_arrow_schema + + return new_datagram + + # 9. String Representations + def __str__(self) -> str: + """ + Return user-friendly string representation. + + Shows the datagram as a simple dictionary for user-facing output, + messages, and logging. Only includes data columns for clean output. + + Returns: + Dictionary-style string representation of data columns only. + """ + return str(self._data) + + def __repr__(self) -> str: + """ + Return detailed string representation for debugging. + + Shows the datagram type and comprehensive information including + data columns, meta columns count, and context for debugging purposes. + + Returns: + Detailed representation with type and metadata information. + """ + meta_count = len(self.meta_columns) + context_key = self.data_context_key + + return ( + f"{self.__class__.__name__}(" + f"data={self._data}, " + f"meta_columns={meta_count}, " + f"context='{context_key}'" + f")" + ) diff --git a/src/orcapod/data/datagrams/dict_tag_packet.py b/src/orcapod/data/datagrams/dict_tag_packet.py new file mode 100644 index 0000000..ea9b7fa --- /dev/null +++ b/src/orcapod/data/datagrams/dict_tag_packet.py @@ -0,0 +1,260 @@ +import logging +from collections.abc import Collection, Mapping +from typing import Self +from xml.etree.ElementInclude import include + +import pyarrow as pa + +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.data.context import DataContext +from orcapod.data.datagrams.dict_datagram import DictDatagram +from orcapod.types import TypeSpec, schemas +from orcapod.types.core import DataValue +from orcapod.types.semantic_converter import SemanticConverter +from orcapod.utils import arrow_utils + +logger = logging.getLogger(__name__) + + +class DictTag(DictDatagram): + """ + A simple tag implementation using Python dictionary. + + Represents a tag (metadata) as a dictionary that can be converted + to different representations like Arrow tables. + """ + + +class DictPacket(DictDatagram): + """ + Enhanced packet implementation with source information support. + + Extends DictDatagram to include source information tracking and + enhanced table conversion capabilities that can include or exclude + source metadata. + + Initialize packet with data and optional source information. + + Args: + data: Primary data content + source_info: Optional mapping of field names to source information + typespec: Optional type specification + semantic_converter: Optional semantic converter + semantic_type_registry: Registry for semantic types. Defaults to system default registry. + arrow_hasher: Optional Arrow hasher. Defaults to system default arrow hasher. + """ + + def __init__( + self, + data: Mapping[str, DataValue], + meta_info: Mapping[str, DataValue] | None = None, + source_info: Mapping[str, str | None] | None = None, + typespec: TypeSpec | None = None, + semantic_converter: SemanticConverter | None = None, + data_context: str | DataContext | None = None, + ) -> None: + # normalize the data content and remove any source info keys + data_only = { + k: v for k, v in data.items() if not k.startswith(constants.SOURCE_PREFIX) + } + contained_source_info = { + k.removeprefix(constants.SOURCE_PREFIX): v + for k, v in data.items() + if k.startswith(constants.SOURCE_PREFIX) + } + + super().__init__( + data_only, + typespec=typespec, + meta_info=meta_info, + semantic_converter=semantic_converter, + data_context=data_context, + ) + + self._source_info = {**contained_source_info, **(source_info or {})} + self._cached_source_info_table: pa.Table | None = None + self._cached_source_info_schema: pa.Schema | None = None + + @property + def _source_info_schema(self) -> pa.Schema: + if self._cached_source_info_schema is None: + self._cached_source_info_schema = pa.schema( + { + f"{constants.SOURCE_PREFIX}{k}": pa.large_string() + for k in self.keys() + } + ) + return self._cached_source_info_schema + + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> pa.Table: + """Convert the packet to an Arrow table.""" + table = super().as_table( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + if self._cached_source_info_table is None: + source_info_data = { + f"{constants.SOURCE_PREFIX}{k}": v + for k, v in self.source_info().items() + } + self._cached_source_info_table = pa.Table.from_pylist( + [source_info_data], schema=self._source_info_schema + ) + assert self._cached_source_info_table is not None, ( + "Cached source info table should not be None" + ) + # subselect the corresponding _source_info as the columns present in the data table + source_info_table = self._cached_source_info_table.select( + [ + f"{constants.SOURCE_PREFIX}{k}" + for k in table.column_names + if k in self.keys() + ] + ) + table = arrow_utils.hstack_tables(table, source_info_table) + return table + + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> dict[str, DataValue]: + """ + Return dictionary representation. + + Args: + include_source: Whether to include source info fields + + Returns: + Dictionary representation of the packet + """ + dict_copy = super().as_dict( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + for key, value in self.source_info().items(): + dict_copy[f"{constants.SOURCE_PREFIX}{key}"] = value + return dict_copy + + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> tuple[str, ...]: + """Return keys of the Python schema.""" + keys = super().keys( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + keys += tuple(f"{constants.SOURCE_PREFIX}{key}" for key in super().keys()) + return keys + + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> schemas.PythonSchema: + """Return copy of the Python schema.""" + schema = super().types( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + for key in self.keys(): + schema[f"{constants.SOURCE_PREFIX}{key}"] = str + return schema + + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> pa.Schema: + """ + Return the PyArrow schema for this datagram. + + Args: + include_data_context: Whether to include data context column in the schema + include_source: Whether to include source info columns in the schema + + Returns: + PyArrow schema representing the datagram's structure + """ + schema = super().arrow_schema( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_context=include_context, + ) + if include_all_info or include_source: + return arrow_utils.join_arrow_schemas(schema, self._source_info_schema) + return schema + + def as_datagram( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_source: bool = False, + ) -> DictDatagram: + """ + Convert the packet to a DictDatagram. + + Args: + include_source: Whether to include source info fields + + Returns: + DictDatagram representation of the packet + """ + + data = self.as_dict( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_source=include_source, + ) + typespec = self.types( + include_all_info=include_all_info, + include_meta_columns=include_meta_columns, + include_source=include_source, + ) + return DictDatagram( + data, + typespec=typespec, + semantic_converter=self._semantic_converter, + data_context=self._data_context, + ) + + def source_info(self) -> dict[str, str | None]: + """ + Return source information for all keys. + + Returns: + Dictionary mapping field names to their source info + """ + return {key: self._source_info.get(key, None) for key in self.keys()} + + def copy(self, include_cache: bool = True) -> Self: + """Return a shallow copy of the packet.""" + instance = super().copy(include_cache=include_cache) + instance._source_info = self._source_info.copy() + if include_cache: + instance._cached_source_info_table = self._cached_source_info_table + return instance diff --git a/src/orcapod/data/kernels.py b/src/orcapod/data/kernels.py new file mode 100644 index 0000000..1cda423 --- /dev/null +++ b/src/orcapod/data/kernels.py @@ -0,0 +1,187 @@ +from abc import ABC, abstractmethod +from collections.abc import Collection +from typing import Any +from orcapod.protocols import data_protocols as dp +import logging +from orcapod.data.streams import KernelStream +from orcapod.data.base import LabeledContentIdentifiableBase +from orcapod.data.context import DataContext +from orcapod.data.trackers import DEFAULT_TRACKER_MANAGER +from orcapod.types import TypeSpec + +logger = logging.getLogger(__name__) + + +class TrackedKernelBase(ABC, LabeledContentIdentifiableBase): + """ + Kernel defines the fundamental unit of computation that can be performed on zero, one or more streams of data. + It is the base class for all computations and transformations that can be performed on a collection of streams + (including an empty collection). + A kernel is defined as a callable that takes a (possibly empty) collection of streams as the input + and returns a new stream as output (note that output stream is always singular). + Each "invocation" of the kernel on a collection of streams is assigned a unique ID. + The corresponding invocation information is stored as Invocation object and attached to the output stream + for computational graph tracking. + """ + + def __init__( + self, + label: str | None = None, + data_context: str | DataContext | None = None, + skip_tracking: bool = False, + tracker_manager: dp.TrackerManager | None = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + self._label = label + + self._data_context = DataContext.resolve_data_context(data_context) + + self._skip_tracking = skip_tracking + self._tracker_manager = tracker_manager or DEFAULT_TRACKER_MANAGER + + @property + def data_context(self) -> DataContext: + return self._data_context + + @property + def data_context_key(self) -> str: + """Return the data context key.""" + return self._data_context.context_key + + @property + @abstractmethod + def kernel_id(self) -> tuple[str, ...]: ... + + def pre_kernel_processing(self, *streams: dp.Stream) -> tuple[dp.Stream, ...]: + """ + Pre-processing step that can be overridden by subclasses to perform any necessary pre-processing + on the input streams before the main computation. This is useful if you need to modify the input streams + or perform any other operations before the main computation. Critically, any Kernel/Pod invocations in the + pre-processing step will be tracked outside of the computation in the kernel. + Default implementation is a no-op, returning the input streams unchanged. + """ + return streams + + @abstractmethod + def validate_inputs(self, *streams: dp.Stream) -> None: ... + + def prepare_output_stream( + self, *streams: dp.Stream, label: str | None = None + ) -> dp.LiveStream: + """ + Prepare the output stream for the kernel invocation. + This method is called after the main computation is performed. + It creates a KernelStream with the provided streams and label. + """ + return KernelStream(source=self, upstreams=streams, label=label) + + def track_invocation(self, *streams: dp.Stream, label: str | None = None) -> None: + """ + Track the invocation of the kernel with the provided streams. + This is a convenience method that calls record_kernel_invocation. + """ + if not self._skip_tracking and self._tracker_manager is not None: + self._tracker_manager.record_kernel_invocation(self, streams, label=label) + + def __call__( + self, *streams: dp.Stream, label: str | None = None, **kwargs + ) -> dp.LiveStream: + processed_streams = self.pre_kernel_processing(*streams) + self.validate_inputs(*processed_streams) + output_stream = self.prepare_output_stream(*processed_streams, label=label) + self.track_invocation(*processed_streams, label=label) + return output_stream + + @abstractmethod + def forward(self, *streams: dp.Stream) -> dp.Stream: + """ + Trigger the main computation of the kernel on a collection of streams. + This method is called when the kernel is invoked with a collection of streams. + Subclasses should override this method to provide the kernel with its unique behavior + """ + + def output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + processed_streams = self.pre_kernel_processing(*streams) + self.validate_inputs(*processed_streams) + return self.kernel_output_types(*processed_streams) + + @abstractmethod + def kernel_output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: ... + + def __repr__(self): + return self.__class__.__name__ + + def __str__(self): + if self._label is not None: + return f"{self.__class__.__name__}({self._label})" + return self.__class__.__name__ + + @abstractmethod + def kernel_identity_structure( + self, streams: Collection[dp.Stream] | None = None + ) -> Any: ... + + def identity_structure(self, streams: Collection[dp.Stream] | None = None) -> Any: + # Default implementation of identity_structure for the kernel only + # concerns the kernel class and the streams if present. Subclasses of + # Kernels should override this method to provide a more meaningful + # representation of the kernel. Note that kernel must provide the notion + # of identity under possibly two distinct contexts: + # 1) identity of the kernel in itself when invoked without any stream + # 2) identity of the specific invocation of the kernel with a collection of streams + # While the latter technically corresponds to the identity of the invocation and not + # the kernel, only kernel can provide meaningful information as to the uniqueness of + # the invocation as only kernel would know if / how the input stream(s) alter the identity + # of the invocation. For example, if the kernel corresponds to an commutative computation + # and therefore kernel K(x, y) == K(y, x), then the identity structure must reflect the + # equivalence of the two by returning the same identity structure for both invocations. + # This can be achieved, for example, by returning a set over the streams instead of a tuple. + if streams is not None: + streams = self.pre_kernel_processing(*streams) + return self.kernel_identity_structure(streams) + + +class WrappedKernel(TrackedKernelBase): + """ + A wrapper for a kernel that allows it to be used as a stream source. + This is useful for cases where you want to use a kernel as a source of data + in a pipeline or other data processing context. + """ + + def __init__(self, kernel: dp.Kernel, **kwargs) -> None: + # TODO: handle fixed input stream already set on the kernel + super().__init__(**kwargs) + self.kernel = kernel + + @property + def kernel_id(self) -> tuple[str, ...]: + return self.kernel.kernel_id + + def computed_label(self) -> str | None: + """ + Compute a label for this kernel based on its content. + If label is not explicitly set for this kernel and computed_label returns a valid value, + it will be used as label of this kernel. + """ + return self.kernel.label + + def forward(self, *streams: dp.Stream) -> dp.Stream: + return self.kernel.forward(*streams) + + def kernel_output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + return self.kernel.output_types(*streams) + + def validate_inputs(self, *streams: dp.Stream) -> None: + pass + + def __repr__(self): + return f"WrappedKernel({self.kernel!r})" + + def __str__(self): + return f"WrappedKernel:{self.kernel!s}" + + def kernel_identity_structure( + self, streams: Collection[dp.Stream] | None = None + ) -> Any: + return self.kernel.identity_structure(streams) diff --git a/src/orcapod/data/operators.py b/src/orcapod/data/operators.py new file mode 100644 index 0000000..f10bb2e --- /dev/null +++ b/src/orcapod/data/operators.py @@ -0,0 +1,320 @@ +from orcapod.data.kernels import TrackedKernelBase +from orcapod.protocols import data_protocols as dp +from orcapod.data.streams import ImmutableTableStream +from orcapod.types import TypeSpec +from orcapod.types.typespec_utils import union_typespecs, intersection_typespecs +from abc import abstractmethod +from typing import Any +from orcapod.errors import InputValidationError + + +class Operator(TrackedKernelBase): + """ + Base class for all operators. + Operators are a special type of kernel that can be used to perform operations on streams. + + They are defined as a callable that takes a (possibly empty) collection of streams as the input + and returns a new stream as output (note that output stream is always singular). + """ + + +class NonZeroInputOperator(Operator): + """ + Operators that work with at least one input stream. + This is useful for operators that can take a variable number of (but at least one ) input streams, + such as joins, unions, etc. + """ + + def validate_inputs(self, *streams: dp.Stream) -> None: + self.verify_non_zero_input(*streams) + return self.op_validate_inputs(*streams) + + @abstractmethod + def op_validate_inputs(self, *streams: dp.Stream) -> None: + """ + This method should be implemented by subclasses to validate the inputs to the operator. + It takes two streams as input and raises an error if the inputs are not valid. + """ + ... + + def verify_non_zero_input( + self, + *streams: dp.Stream, + ) -> None: + """ + Check that the inputs to the variable inputs operator are valid. + This method is called before the forward method to ensure that the inputs are valid. + """ + if len(streams) == 0: + raise ValueError( + f"Operator {self.__class__.__name__} requires at least one input stream." + ) + + def forward(self, *streams: dp.Stream) -> dp.Stream: + """ + Forward method for variable inputs operators. + It expects at least one stream as input. + """ + return self.op_forward(*streams) + + def output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + self.validate_inputs(*streams) + return self.op_output_types(*streams) + + def identity_structure(self, *streams: dp.Stream) -> Any: + """ + Return a structure that represents the identity of this operator. + This is used to ensure that the operator can be uniquely identified in the computational graph. + """ + if len(streams) > 0: + self.verify_non_zero_input(*streams) + return self.op_identity_structure(*streams) + + @abstractmethod + def op_forward(self, *streams: dp.Stream) -> dp.Stream: + """ + This method should be implemented by subclasses to define the specific behavior of the non-zero input operator. + It takes variable number of streams as input and returns a new stream as output. + """ + ... + + @abstractmethod + def op_output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + """ + This method should be implemented by subclasses to return the typespecs of the input and output streams. + It takes at least one stream as input and returns a tuple of typespecs. + """ + ... + + @abstractmethod + def op_identity_structure(self, *streams: dp.Stream) -> Any: + """ + This method should be implemented by subclasses to return a structure that represents the identity of the operator. + It takes zero or more streams as input and returns a tuple containing the operator name and a set of streams. + If zero, it should return identity of the operator itself. + If one or more, it should return a identity structure approrpiate for the operator invoked on the given streams. + """ + ... + + +class BinaryOperator(Operator): + """ + Base class for all operators. + """ + + def validate_inputs(self, *streams: dp.Stream) -> None: + self.check_binary_inputs(*streams) + left_stream, right_stream = streams + return self.op_validate_inputs(left_stream, right_stream) + + @abstractmethod + def op_validate_inputs( + self, left_stream: dp.Stream, right_stream: dp.Stream + ) -> None: + """ + This method should be implemented by subclasses to validate the inputs to the operator. + It takes two streams as input and raises an error if the inputs are not valid. + """ + ... + + def check_binary_inputs( + self, *streams: dp.Stream, allow_zero: bool = False + ) -> None: + """ + Check that the inputs to the binary operator are valid. + This method is called before the forward method to ensure that the inputs are valid. + """ + if not (allow_zero and len(streams) == 0) and len(streams) != 2: + raise ValueError("BinaryOperator requires exactly two input streams.") + + def forward(self, *streams: dp.Stream) -> dp.Stream: + """ + Forward method for binary operators. + It expects exactly two streams as input. + """ + self.check_binary_inputs(*streams) + left_stream, right_stream = streams + return self.op_forward(left_stream, right_stream) + + def output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + self.check_binary_inputs(*streams) + left_stream, right_stream = streams + return self.op_output_types(left_stream, right_stream) + + def identity_structure(self, *streams: dp.Stream) -> Any: + """ + Return a structure that represents the identity of this operator. + This is used to ensure that the operator can be uniquely identified in the computational graph. + """ + self.check_binary_inputs(*streams, allow_zero=True) + return self.op_identity_structure(*streams) + + @abstractmethod + def op_forward(self, left_stream: dp.Stream, right_stream: dp.Stream) -> dp.Stream: + """ + This method should be implemented by subclasses to define the specific behavior of the binary operator. + It takes two streams as input and returns a new stream as output. + """ + ... + + @abstractmethod + def op_output_types( + self, left_stream: dp.Stream, right_stream: dp.Stream + ) -> tuple[TypeSpec, TypeSpec]: + """ + This method should be implemented by subclasses to return the typespecs of the input and output streams. + It takes two streams as input and returns a tuple of typespecs. + """ + ... + + @abstractmethod + def op_identity_structure(self, *streams: dp.Stream) -> Any: + """ + This method should be implemented by subclasses to return a structure that represents the identity of the operator. + It takes two streams as input and returns a tuple containing the operator name and a set of streams. + """ + ... + + +class BinaryJoin(BinaryOperator): + def op_identity_structure(self, *streams: dp.Stream) -> Any: + # Join does not depend on the order of the streams -- convert it onto a set + id_struct = (self.__class__.__name__,) + if len(streams) == 2: + id_struct += (set(streams),) + return id_struct + + def op_forward( + self, left_stream: dp.Stream, right_stream: dp.Stream + ) -> ImmutableTableStream: + """ + Joins two streams together based on their tags. + The resulting stream will contain all the tags from both streams. + """ + + left_tag_typespec, left_packet_typespec = left_stream.types() + right_tag_typespec, right_packet_typespec = right_stream.types() + + common_tag_keys = tuple( + intersection_typespecs(left_tag_typespec, right_tag_typespec).keys() + ) + joined_tag_keys = tuple( + union_typespecs(left_tag_typespec, right_tag_typespec).keys() + ) + + # performing a check to ensure that packets are compatible + union_typespecs(left_packet_typespec, right_packet_typespec) + + joined_table = left_stream.as_table().join( + right_stream.as_table(), + keys=common_tag_keys, + join_type="inner", + ) + + return ImmutableTableStream( + joined_table, + tag_columns=tuple(joined_tag_keys), + source=self, + upstreams=(left_stream, right_stream), + ) + + def op_output_types(self, left_stream, right_stream) -> tuple[TypeSpec, TypeSpec]: + left_tag_typespec, left_packet_typespec = left_stream.types() + right_tag_typespec, right_packet_typespec = right_stream.types() + joined_tag_typespec = union_typespecs(left_tag_typespec, right_tag_typespec) + joined_packet_typespec = union_typespecs( + left_packet_typespec, right_packet_typespec + ) + return joined_tag_typespec, joined_packet_typespec + + def op_validate_inputs( + self, left_stream: dp.Stream, right_stream: dp.Stream + ) -> None: + try: + self.op_output_types(left_stream, right_stream) + except Exception as e: + raise InputValidationError(f"Input streams are not compatible: {e}") + + def __repr__(self) -> str: + return "Join()" + + +class Join(NonZeroInputOperator): + @property + def kernel_id(self) -> tuple[str, ...]: + """ + Returns a unique identifier for the kernel. + This is used to identify the kernel in the computational graph. + """ + return (f"{self.__class__.__name__}",) + + def op_identity_structure(self, *streams: dp.Stream) -> Any: + # Join does not depend on the order of the streams -- convert it onto a set + id_struct = (self.__class__.__name__,) + if len(streams) > 0: + id_struct += (set(streams),) + return id_struct + + def op_forward(self, *streams: dp.Stream) -> ImmutableTableStream: + """ + Joins two streams together based on their tags. + The resulting stream will contain all the tags from both streams. + """ + + all_tag_typespecs = [] + all_packet_typespecs = [] + + joined_stream = streams[0] + for stream in streams[1:]: + joined_tag_typespec, joined_packet_typespec = joined_stream.types() + stream_tag_typespec, stream_packet_typespec = stream.types() + joined_table = joined_stream.as_table().join( + stream.as_table(), + keys=intersection_typespecs(joined_tag_typespec, stream_tag_typespec), + join_type="inner", + ) + + for stream in streams: + tag_typespec, packet_typespec = stream.types() + all_tag_typespecs.append(tag_typespec) + all_packet_typespecs.append(packet_typespec) + + common_tag_keys = tuple(intersection_typespecs(*all_tag_typespecs).keys()) + joined_tag_keys = tuple(union_typespecs(*all_tag_typespecs).keys()) + + # performing a check to ensure that packets are compatible + union_typespecs(*all_packet_typespecs) + + joined_table = left_stream.as_table().join( + right_stream.as_table(), + keys=common_tag_keys, + join_type="inner", + ) + + return ImmutableTableStream( + joined_table, + tag_columns=tuple(joined_tag_keys), + source=self, + upstreams=streams, + ) + + def op_output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + left_stream, right_stream = streams + left_tag_typespec, left_packet_typespec = left_stream.types() + right_tag_typespec, right_packet_typespec = right_stream.types() + joined_tag_typespec = union_typespecs(left_tag_typespec, right_tag_typespec) + joined_packet_typespec = union_typespecs( + left_packet_typespec, right_packet_typespec + ) + return joined_tag_typespec, joined_packet_typespec + + def op_validate_inputs( + self, left_stream: dp.Stream, right_stream: dp.Stream + ) -> None: + try: + self.op_output_types(left_stream, right_stream) + except Exception as e: + raise InputValidationError(f"Input streams are not compatible: {e}") + + def __repr__(self) -> str: + return "Join()" diff --git a/src/orcapod/data/pods.py b/src/orcapod/data/pods.py new file mode 100644 index 0000000..a66cfc6 --- /dev/null +++ b/src/orcapod/data/pods.py @@ -0,0 +1,509 @@ +from datetime import datetime, timezone +import logging +import sys +from abc import abstractmethod +from collections.abc import Callable, Collection, Iterable, Sequence +from typing import Any, Literal, cast, TYPE_CHECKING + +from orcapod.data.datagrams import ( + DictPacket, + ArrowPacket, +) +from orcapod.data.context import DataContext +from orcapod.data.kernels import KernelStream, TrackedKernelBase +from orcapod.data.operators import Join +from orcapod.data.streams import LazyPodResultStream, PodStream +from orcapod.hashing.hash_utils import get_function_signature +from orcapod.protocols import data_protocols as dp +from orcapod.protocols import hashing_protocols as hp +from orcapod.protocols.store_protocols import ArrowDataStore +from orcapod.types import TypeSpec +from orcapod.types.schemas import PythonSchema +from orcapod.types.semantic_converter import SemanticConverter +from orcapod.types import typespec_utils as tsutils +from orcapod.utils import arrow_utils +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.utils.lazy_module import LazyModule + +if TYPE_CHECKING: + import pyarrow as pa +else: + pa = LazyModule("pyarrow") + +logger = logging.getLogger(__name__) + +error_handling_options = Literal["raise", "ignore", "warn"] + + +class ActivatablePodBase(TrackedKernelBase): + """ + FunctionPod is a specialized kernel that encapsulates a function to be executed on data streams. + It allows for the execution of a function with a specific label and can be tracked by the system. + """ + + @abstractmethod + def input_packet_types(self) -> TypeSpec: + """ + Return the input typespec for the pod. This is used to validate the input streams. + """ + ... + + @abstractmethod + def output_packet_types(self) -> TypeSpec: + """ + Return the output typespec for the pod. This is used to validate the output streams. + """ + ... + + def __init__( + self, + error_handling: error_handling_options = "raise", + label: str | None = None, + **kwargs, + ) -> None: + super().__init__(label=label, **kwargs) + self._active = True + self.error_handling = error_handling + + def kernel_output_types(self, *streams: dp.Stream) -> tuple[TypeSpec, TypeSpec]: + """ + Return the input and output typespecs for the pod. + This is used to validate the input and output streams. + """ + tag_typespec, _ = streams[0].types() + return tag_typespec, self.output_packet_types() + + def is_active(self) -> bool: + """ + Check if the pod is active. If not, it will not process any packets. + """ + return self._active + + def set_active(self, active: bool) -> None: + """ + Set the active state of the pod. If set to False, the pod will not process any packets. + """ + self._active = active + + @staticmethod + def _join_streams(*streams: dp.Stream) -> dp.Stream: + if not streams: + raise ValueError("No streams provided for joining") + # Join the streams using a suitable join strategy + if len(streams) == 1: + return streams[0] + + joined_stream = streams[0] + for next_stream in streams[1:]: + joined_stream = Join()(joined_stream, next_stream) + return joined_stream + + def pre_kernel_processing(self, *streams: dp.Stream) -> tuple[dp.Stream, ...]: + """ + Prepare the incoming streams for execution in the pod. At least one stream must be present. + If more than one stream is present, the join of the provided streams will be returned. + """ + # if multiple streams are provided, join them + # otherwise, return as is + if len(streams) <= 1: + return streams + + output_stream = self._join_streams(*streams) + return (output_stream,) + + def validate_inputs(self, *streams: dp.Stream) -> None: + if len(streams) != 1: + raise ValueError( + f"{self.__class__.__name__} expects exactly one input stream, got {len(streams)}" + ) + input_stream = streams[0] + _, incoming_packet_types = input_stream.types() + if not tsutils.check_typespec_compatibility( + incoming_packet_types, self.input_packet_types() + ): + # TODO: use custom exception type for better error handling + raise ValueError( + f"Input typespec {incoming_packet_types} is not compatible with expected input typespec {self.input_packet_types}" + ) + + def prepare_output_stream( + self, *streams: dp.Stream, label: str | None = None + ) -> KernelStream: + return KernelStream(source=self, upstreams=streams, label=label) + + def forward(self, *streams: dp.Stream) -> dp.Stream: + assert len(streams) == 1, "PodBase.forward expects exactly one input stream" + return LazyPodResultStream(pod=self, prepared_stream=streams[0]) + + @abstractmethod + def call( + self, tag: dp.Tag, packet: dp.Packet + ) -> tuple[dp.Tag, dp.Packet | None]: ... + + def track_invocation(self, *streams: dp.Stream, label: str | None = None) -> None: + if not self._skip_tracking and self._tracker_manager is not None: + self._tracker_manager.record_pod_invocation(self, streams, label=label) + + +def function_pod( + output_keys: str | Collection[str] | None = None, + function_name: str | None = None, + label: str | None = None, + **kwargs, +) -> Callable[..., "FunctionPod"]: + """ + Decorator that wraps a function in a FunctionPod instance. + + Args: + output_keys: Keys for the function output(s) + function_name: Name of the function pod; if None, defaults to the function name + **kwargs: Additional keyword arguments to pass to the FunctionPod constructor. Please refer to the FunctionPod documentation for details. + + Returns: + FunctionPod instance wrapping the decorated function + """ + + def decorator(func) -> FunctionPod: + if func.__name__ == "": + raise ValueError("Lambda functions cannot be used with function_pod") + + if not hasattr(func, "__module__") or func.__module__ is None: + raise ValueError( + f"Function {func.__name__} must be defined at module level" + ) + + # Store the original function in the module for pickling purposes + # and make sure to change the name of the function + module = sys.modules[func.__module__] + base_function_name = func.__name__ + new_function_name = f"_original_{func.__name__}" + setattr(module, new_function_name, func) + # rename the function to be consistent and make it pickleable + setattr(func, "__name__", new_function_name) + setattr(func, "__qualname__", new_function_name) + + # Create a simple typed function pod + pod = FunctionPod( + function=func, + output_keys=output_keys, + function_name=function_name or base_function_name, + label=label, + **kwargs, + ) + return pod + + return decorator + + +class FunctionPod(ActivatablePodBase): + def __init__( + self, + function: dp.PodFunction, + output_keys: str | Collection[str] | None = None, + function_name=None, + input_typespec: TypeSpec | None = None, + output_typespec: TypeSpec | Sequence[type] | None = None, + label: str | None = None, + function_info_extractor: hp.FunctionInfoExtractor | None = None, + **kwargs, + ) -> None: + self.function = function + + if output_keys is None: + output_keys = [] + if isinstance(output_keys, str): + output_keys = [output_keys] + self.output_keys = output_keys + if function_name is None: + if hasattr(self.function, "__name__"): + function_name = getattr(self.function, "__name__") + else: + raise ValueError( + "function_name must be provided if function has no __name__ attribute" + ) + self.function_name = function_name + super().__init__(label=label or self.function_name, **kwargs) + + # extract input and output types from the function signature + input_packet_types, output_packet_types = tsutils.extract_function_typespecs( + self.function, + self.output_keys, + input_typespec=input_typespec, + output_typespec=output_typespec, + ) + self._input_packet_schema = PythonSchema(input_packet_types) + self._output_packet_schema = PythonSchema(output_packet_types) + self._output_semantic_converter = SemanticConverter.from_semantic_schema( + self._output_packet_schema.to_semantic_schema( + semantic_type_registry=self.data_context.semantic_type_registry + ) + ) + + self._function_info_extractor = function_info_extractor + + @property + def kernel_id(self) -> tuple[str, ...]: + return (self.function_name,) + + def input_packet_types(self) -> PythonSchema: + """ + Return the input typespec for the function pod. + This is used to validate the input streams. + """ + return self._input_packet_schema.copy() + + def output_packet_types(self) -> PythonSchema: + """ + Return the output typespec for the function pod. + This is used to validate the output streams. + """ + return self._output_packet_schema.copy() + + def __repr__(self) -> str: + return f"FunctionPod:{self.function_name}" + + def __str__(self) -> str: + include_module = self.function.__module__ != "__main__" + func_sig = get_function_signature( + self.function, + name_override=self.function_name, + include_module=include_module, + ) + return f"FunctionPod:{func_sig}" + + def call(self, tag: dp.Tag, packet: dp.Packet) -> tuple[dp.Tag, DictPacket | None]: + if not self.is_active(): + logger.info( + f"Pod is not active: skipping computation on input packet {packet}" + ) + return tag, None + output_values = [] + + # any kernel/pod invocation happening inside the function will NOT be tracked + with self._tracker_manager.no_tracking(): + values = self.function(**packet.as_dict(include_source=False)) + + if len(self.output_keys) == 0: + output_values = [] + elif len(self.output_keys) == 1: + output_values = [values] # type: ignore + elif isinstance(values, Iterable): + output_values = list(values) # type: ignore + elif len(self.output_keys) > 1: + raise ValueError( + "Values returned by function must be a pathlike or a sequence of pathlikes" + ) + + if len(output_values) != len(self.output_keys): + raise ValueError( + f"Number of output keys {len(self.output_keys)}:{self.output_keys} does not match number of values returned by function {len(output_values)}" + ) + + output_data = {k: v for k, v in zip(self.output_keys, output_values)} + source_info = {k: ":".join(self.kernel_id + (k,)) for k in output_data} + + output_packet = DictPacket( + {k: v for k, v in zip(self.output_keys, output_values)}, + source_info=source_info, + typespec=self.output_packet_types(), + semantic_converter=self._output_semantic_converter, + data_context=self._data_context, + ) + return tag, output_packet + + def kernel_identity_structure( + self, streams: Collection[dp.Stream] | None = None + ) -> Any: + # construct identity structure for the function + + # if function_info_extractor is available, use that but substitute the function_name + if self._function_info_extractor is not None: + function_info = self._function_info_extractor.extract_function_info( + self.function, + function_name=self.function_name, + input_typespec=self.input_packet_types(), + output_typespec=self.output_packet_types(), + ) + else: + # use basic information only + function_info = { + "name": self.function_name, + "input_packet_types": self.input_packet_types(), + "output_packet_types": self.output_packet_types(), + } + + id_struct = ( + self.__class__.__name__, + function_info, + ) + # if streams are provided, perform pre-processing step, validate, and add the + # resulting single stream to the identity structure + if streams is not None and len(streams) != 0: + id_struct += tuple(streams) + + return id_struct + + +class WrappedPod(ActivatablePodBase): + """ + A wrapper for an existing pod, allowing for additional functionality or modifications without changing the original pod. + This class is meant to serve as a base class for other pods that need to wrap existing pods. + Note that only the call logic is pass through to the wrapped pod, but the forward logic is not. + """ + + def __init__( + self, + pod: dp.Pod, + label: str | None = None, + data_context: str | DataContext | None = None, + **kwargs, + ) -> None: + if data_context is None: + data_context = pod.data_context_key + super().__init__( + label=label, + data_context=data_context, + **kwargs, + ) + self.pod = pod + + @property + def kernel_id(self) -> tuple[str, ...]: + """ + Return the pod ID, which is the function name of the wrapped pod. + This is used to identify the pod in the system. + """ + return self.pod.kernel_id + + def computed_label(self) -> str | None: + return self.pod.label + + def input_packet_types(self) -> TypeSpec: + """ + Return the input typespec for the stored pod. + This is used to validate the input streams. + """ + return self.pod.input_packet_types() + + def output_packet_types(self) -> TypeSpec: + """ + Return the output typespec for the stored pod. + This is used to validate the output streams. + """ + return self.pod.output_packet_types() + + def validate_inputs(self, *streams: dp.Stream) -> None: + self.pod.validate_inputs(*streams) + + def call(self, tag: dp.Tag, packet: dp.Packet) -> tuple[dp.Tag, dp.Packet | None]: + return self.pod.call(tag, packet) + + def kernel_identity_structure( + self, streams: Collection[dp.Stream] | None = None + ) -> Any: + return self.pod.identity_structure(streams) + + def __repr__(self) -> str: + return f"WrappedPod({self.pod!r})" + + def __str__(self) -> str: + return f"WrappedPod:{self.pod!s}" + + +class CachedPod(WrappedPod): + """ + A pod that caches the results of the wrapped pod. + This is useful for pods that are expensive to compute and can benefit from caching. + """ + + # name of the column in the tag store that contains the packet hash + PACKET_HASH_COLUMN = f"{constants.META_PREFIX}packet_hash" + DATA_RETRIEVED_FLAG = f"{constants.META_PREFIX}data_retrieved" + + def __init__( + self, + pod: dp.Pod, + result_store: ArrowDataStore, + record_path_prefix: tuple[str, ...] = (), + **kwargs, + ): + super().__init__(pod, **kwargs) + self.record_path_prefix = record_path_prefix + self.result_store = result_store + # unset data_context native to the object + + self.pod_hash = self.data_context.object_hasher.hash_to_hex( + self.pod, prefix_hasher_id=True + ) + + @property + def record_path(self) -> tuple[str, ...]: + """ + Return the path to the record in the result store. + This is used to store the results of the pod. + """ + return self.record_path_prefix + self.kernel_id + (self.pod_hash,) + + def call( + self, + tag: dp.Tag, + packet: dp.Packet, + skip_record_check: bool = False, + skip_recording: bool = False, + overwrite_existing: bool = False, + ) -> tuple[dp.Tag, dp.Packet | None]: + output_packet = None + if not skip_record_check: + output_packet = self.get_recorded_output_packet(packet) + if output_packet is None: + tag, output_packet = self.pod.call(tag, packet) + if output_packet is not None and not skip_recording: + self.record_packet( + packet, output_packet, overwrite_existing=overwrite_existing + ) + + return tag, output_packet + + def record_packet( + self, + input_packet: dp.Packet, + output_packet: dp.Packet, + overwrite_existing: bool = False, + ignore_duplicates: bool = False, + ) -> dp.Packet: + """ + Record the output packet against the input packet in the result store. + """ + data_table = output_packet.as_table(include_context=True, include_source=True) + + result_flag = self.result_store.add_record( + self.record_path, + input_packet.content_hash(), + data_table, + overwrite_existing=overwrite_existing, + ignore_duplicates=ignore_duplicates, + ) + if result_flag is None: + # TODO: do more specific error handling + raise ValueError( + f"Failed to record packet {input_packet} in result store {self.result_store}" + ) + # TODO: make store return retrieved table + return output_packet + + def get_recorded_output_packet(self, input_packet: dp.Packet) -> dp.Packet | None: + """ + Retrieve the output packet from the result store based on the input packet. + If the output packet is not found, return None. + """ + result_table = self.result_store.get_record_by_id( + self.record_path, input_packet.content_hash() + ) + if result_table is None: + return None + + # note that data context will be loaded from the result store + return ArrowPacket( + result_table, + meta_info={self.DATA_RETRIEVED_FLAG: str(datetime.now(timezone.utc))}, + ) diff --git a/src/orcapod/data/streams.py b/src/orcapod/data/streams.py new file mode 100644 index 0000000..f0178d5 --- /dev/null +++ b/src/orcapod/data/streams.py @@ -0,0 +1,991 @@ +import logging +from pathlib import Path +import warnings +from abc import ABC, abstractmethod +from collections.abc import Collection, Iterator +from datetime import datetime, timezone +from itertools import repeat +from typing import TYPE_CHECKING, Any, Literal + +from orcapod.data.base import LabeledContentIdentifiableBase +from orcapod.data.context import DataContext +from orcapod.data.datagrams import ( + ArrowPacket, + ArrowTag, + DictTag, +) +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.protocols import data_protocols as dp +from orcapod.types import TypeSpec, schemas +from orcapod.types.semantic_converter import SemanticConverter +from orcapod.utils import arrow_utils +from orcapod.utils.lazy_module import LazyModule + +if TYPE_CHECKING: + import pyarrow as pa +else: + pa = LazyModule("pyarrow") + +# TODO: consider using this instead of making copy of dicts +# from types import MappingProxyType + +logger = logging.getLogger(__name__) + + +class StreamBase(ABC, LabeledContentIdentifiableBase): + """ + A stream is a collection of tagged-packets that are generated by an operation. + The stream is iterable and can be used to access the packets in the stream. + + A stream has property `invocation` that is an instance of Invocation that generated the stream. + This may be None if the stream is not generated by a kernel (i.e. directly instantiated by a user). + """ + + def __init__( + self, + source: dp.Kernel | None = None, + upstreams: tuple[dp.Stream, ...] = (), + data_context: str | DataContext | None = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + self._source = source + self._upstreams = upstreams + self._last_modified: datetime | None = None + self._set_modified_time() + # note that this is not necessary for Stream protocol, but is provided + # for convenience to resolve semantic types and other context-specific information + self._data_context = DataContext.resolve_data_context(data_context) + + @property + def data_context(self) -> DataContext: + """ + Returns the data context for the stream. + This is used to resolve semantic types and other context-specific information. + """ + return self._data_context + + @property + def source(self) -> dp.Kernel | None: + """ + The source of the stream, which is the kernel that generated the stream. + This is typically used to track the origin of the stream in the computational graph. + """ + return self._source + + @property + def upstreams(self) -> tuple[dp.Stream, ...]: + """ + The upstream streams that are used to generate this stream. + This is typically used to track the origin of the stream in the computational graph. + """ + return self._upstreams + + def computed_label(self) -> str | None: + if self.source is not None: + # use the invocation operation label + return self.source.label + return None + + @abstractmethod + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: ... + + @abstractmethod + def types(self) -> tuple[TypeSpec, TypeSpec]: ... + + @property + def last_modified(self) -> datetime | None: + """ + Returns when the stream's content was last modified. + This is used to track the time when the stream was last accessed. + Returns None if the stream has not been accessed yet. + """ + return self._last_modified + + @property + def is_current(self) -> bool: + """ + Returns whether the stream is current. + A stream is current if the content is up-to-date with respect to its source. + This can be used to determine if a stream with non-None last_modified is up-to-date. + Note that for asynchronous streams, this status is not applicable and always returns False. + """ + if self.last_modified is None: + # If there is no last_modified timestamp, we cannot determine if the stream is current + return False + + for upstream in self.upstreams: + if ( + not upstream.is_current + or upstream.last_modified is None + or upstream.last_modified > self.last_modified + ): + return False + return True + + def _set_modified_time( + self, timestamp: datetime | None = None, invalidate: bool = False + ) -> None: + if invalidate: + self._last_modified = None + return + + if timestamp is not None: + self._last_modified = timestamp + else: + self._last_modified = datetime.now(timezone.utc) + + def __iter__( + self, + ) -> Iterator[tuple[dp.Tag, dp.Packet]]: + return self.iter_packets() + + @abstractmethod + def iter_packets( + self, + ) -> Iterator[tuple[dp.Tag, dp.Packet]]: ... + + @abstractmethod + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: ... + + def flow(self) -> Collection[tuple[dp.Tag, dp.Packet]]: + """ + Flow everything through the stream, returning the entire collection of + (Tag, Packet) as a collection. This will tigger any upstream computation of the stream. + """ + return [e for e in self] + + # --------------------- Recursive methods --------------------------- + # These methods form a step in the multi-class recursive invocation that follows the pattern of + # Stream -> Invocation -> Kernel -> Stream ... -> Invocation -> Kernel + # Most of the method logic would be found in Kernel's implementation of the method with + # Stream and Invocation simply serving as recursive steps + + def identity_structure(self) -> Any: + """ + Identity structure of a stream is deferred to the identity structure + of the associated invocation, if present. + A bare stream without invocation has no well-defined identity structure. + Specialized stream subclasses should override this method to provide more meaningful identity structure + """ + if self.source is not None: + # if the stream is generated by an operation, use the identity structure from the invocation + return self.source.identity_structure(self.upstreams) + return super().identity_structure() + + +class ImmutableTableStream(StreamBase): + """ + An immutable stream based on a PyArrow Table. + This stream is designed to be used with data that is already in a tabular format, + such as data loaded from a file or database. The columns to be treated as tags are + specified at initialization, and the rest of the columns are treated as packets. + The stream is immutable, meaning that once it is created, it cannot be modified. + This is useful for ensuring that the data in the stream remains consistent and unchanging. + + The types of the tag and packet columns are inferred from the PyArrow Table schema. + """ + + def __init__( + self, + table: pa.Table, + tag_columns: Collection[str] = (), + source_info: dict[str, str | None] | None = None, + source: dp.Kernel | None = None, + upstreams: tuple[dp.Stream, ...] = (), + **kwargs, + ) -> None: + super().__init__(source=source, upstreams=upstreams, **kwargs) + + table, data_context_table = arrow_utils.split_by_column_groups( + table, [constants.CONTEXT_KEY] + ) + if data_context_table is None: + data_context_table = pa.table( + {constants.CONTEXT_KEY: pa.nulls(len(table), pa.large_string())} + ) + + prefix_info = {constants.SOURCE_PREFIX: source_info} + + # determine tag columns first and then exclude any source info + self._tag_columns = tuple(c for c in tag_columns if c in table.column_names) + table, prefix_tables = arrow_utils.prepare_prefixed_columns( + table, prefix_info, exclude_columns=self._tag_columns + ) + # now table should only contain tag columns and packet columns + self._packet_columns = tuple( + c for c in table.column_names if c not in tag_columns + ) + self._table = table + self._source_info_table = prefix_tables[constants.SOURCE_PREFIX] + self._data_context_table = data_context_table + + if len(self._packet_columns) == 0: + raise ValueError( + "No packet columns found in the table. At least one packet column is required." + ) + + tag_schema = pa.schema( + f for f in self._table.schema if f.name in self._tag_columns + ) + packet_schema = pa.schema( + f for f in self._table.schema if f.name in self._packet_columns + ) + + self._tag_schema = tag_schema + self._packet_schema = packet_schema + self._tag_converter = SemanticConverter.from_semantic_schema( + schemas.SemanticSchema.from_arrow_schema( + tag_schema, self._data_context.semantic_type_registry + ) + ) + self._packet_converter = SemanticConverter.from_semantic_schema( + schemas.SemanticSchema.from_arrow_schema( + packet_schema, self._data_context.semantic_type_registry + ) + ) + + self._cached_elements: list[tuple[dp.Tag, ArrowPacket]] | None = None + self._set_modified_time() # set modified time to now + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Returns the keys of the tag and packet columns in the stream. + This is useful for accessing the columns in the stream. + """ + return self._tag_columns, self._packet_columns + + def types(self) -> tuple[schemas.PythonSchema, schemas.PythonSchema]: + """ + Returns the types of the tag and packet columns in the stream. + This is useful for accessing the types of the columns in the stream. + """ + # TODO: consider using MappingProxyType to avoid copying the dicts + return ( + schemas.PythonSchema.from_arrow_schema( + self._tag_schema, converters=self._tag_converter.as_dict() + ), + schemas.PythonSchema.from_arrow_schema( + self._packet_schema, converters=self._packet_converter.as_dict() + ), + ) + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: + """ + Returns the underlying table representation of the stream. + This is useful for converting the stream to a table format. + """ + output_table = self._table + if include_content_hash: + hash_column_name = ( + "_content_hash" + if include_content_hash is True + else include_content_hash + ) + content_hashes = [ + packet.content_hash() for _, packet in self.iter_packets() + ] + output_table = output_table.append_column( + hash_column_name, pa.array(content_hashes, type=pa.large_string()) + ) + table_stack = (output_table,) + if include_data_context: + table_stack += (self._data_context_table,) + if include_source: + table_stack += (self._source_info_table,) + return arrow_utils.hstack_tables(*table_stack) + + def clear_cache(self) -> None: + """ + Resets the cached elements of the stream. + This is useful for re-iterating over the stream. + """ + self._cached_elements = None + + def iter_packets(self) -> Iterator[tuple[dp.Tag, ArrowPacket]]: + """ + Iterates over the packets in the stream. + Each packet is represented as a tuple of (Tag, Packet). + """ + # TODO: make it work with table batch stream + if self._cached_elements is None: + self._cached_elements = [] + tag_present = len(self._tag_columns) > 0 + if tag_present: + tags = self._table.select(self._tag_columns) + tag_batches = tags.to_batches() + else: + tag_batches = repeat(DictTag({})) + + # TODO: come back and clean up this logic + + packets = self._table.select(self._packet_columns) + for tag_batch, packet_batch in zip(tag_batches, packets.to_batches()): + for i in range(len(packet_batch)): + if tag_present: + tag = ArrowTag( + tag_batch.slice(i, 1), # type: ignore + semantic_converter=self._tag_converter, + data_context=self._data_context, + ) + + else: + tag = tag_batch + self._cached_elements.append( + ( + tag, + ArrowPacket( + packet_batch.slice(i, 1), + semantic_converter=self._packet_converter, + data_context=self._data_context, + ), + ) + ) + yield from self._cached_elements + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(table={self._table.column_names}, " + f"tag_columns={self._tag_columns})" + ) + + +class KernelStream(StreamBase): + """ + Recomputable stream that wraps a stream produced by a kernel to provide + an abstraction over the stream, taking the stream's source and upstreams as the basis of + recomputing the stream. + + This stream is used to represent the output of a kernel invocation. + """ + + def __init__( + self, + output_stream: dp.Stream | None = None, + source: dp.Kernel | None = None, + upstreams: tuple[ + dp.Stream, ... + ] = (), # if provided, this will override the upstreams of the output_stream + **kwargs, + ) -> None: + if (output_stream is None or output_stream.source is None) and source is None: + raise ValueError( + "Either output_stream must have a kernel assigned to it or source must be provided in order to be recomputable." + ) + if source is None: + if output_stream is None or output_stream.source is None: + raise ValueError( + "Either output_stream must have a kernel assigned to it or source must be provided in order to be recomputable." + ) + source = output_stream.source + upstreams = upstreams or output_stream.upstreams + + super().__init__(source=source, upstreams=upstreams, **kwargs) + self._cached_stream = output_stream + + def clear_cache(self) -> None: + """ + Clears the cached stream. + This is useful for re-processing the stream with the same kernel. + """ + self._cached_stream = None + self._set_modified_time(invalidate=True) + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Returns the keys of the tag and packet columns in the stream. + This is useful for accessing the columns in the stream. + """ + self.refresh() + assert self._cached_stream is not None, ( + "_cached_stream should not be None here." + ) + return self._cached_stream.keys() + + def types(self) -> tuple[TypeSpec, TypeSpec]: + """ + Returns the types of the tag and packet columns in the stream. + This is useful for accessing the types of the columns in the stream. + """ + self.refresh() + assert self._cached_stream is not None, ( + "_cached_stream should not be None here." + ) + return self._cached_stream.types() + + @property + def is_current(self) -> bool: + if self._cached_stream is None or not super().is_current: + status = self.refresh() + if not status: # if it failed to update for whatever reason + return False + return True + + def refresh(self, force: bool = False) -> bool: + updated = False + if force or (self._cached_stream is not None and not super().is_current): + self.clear_cache() + + if self._cached_stream is None: + assert self.source is not None, ( + "Stream source must be set to recompute the stream." + ) + self._cached_stream = self.source.forward(*self.upstreams) + self._set_modified_time() + updated = True + + if self._cached_stream is None: + # TODO: use beter error type + raise ValueError( + "Stream could not be updated. Ensure that the source is valid and upstreams are correct." + ) + + return updated + + def invalidate(self) -> None: + """ + Invalidate the stream, marking it as needing recomputation. + This will clear the cached stream and set the last modified time to None. + """ + self.clear_cache() + self._set_modified_time(invalidate=True) + + @property + def last_modified(self) -> datetime | None: + if self._cached_stream is None: + return None + return self._cached_stream.last_modified + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: + self.refresh() + assert self._cached_stream is not None, ( + "Stream has not been updated or is empty." + ) + return self._cached_stream.as_table( + include_data_context=include_data_context, + include_source=include_source, + include_content_hash=include_content_hash, + ) + + def iter_packets(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + self.refresh() + assert self._cached_stream is not None, ( + "Stream has not been updated or is empty." + ) + return self._cached_stream.iter_packets() + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(kernel={self.source}, upstreams={self.upstreams})" + + +class LazyPodResultStream(StreamBase): + """ + A fixed stream that lazily processes packets from a prepared input stream. + This is what Pod.process() returns - it's static/fixed but efficient. + """ + + def __init__(self, pod: dp.Pod, prepared_stream: dp.Stream, **kwargs): + super().__init__(source=pod, upstreams=(prepared_stream,), **kwargs) + self.pod = pod + self.prepared_stream = prepared_stream + self._set_modified_time() # set modified time to when we obtain the iterator + # capture the immutable iterator from the prepared stream + self._prepared_stream_iterator = prepared_stream.iter_packets() + + # Packet-level caching (from your PodStream) + self._cached_output_packets: dict[int, tuple[dp.Tag, dp.Packet | None]] = {} + + def iter_packets(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + if self._prepared_stream_iterator is not None: + for i, (tag, packet) in enumerate(self._prepared_stream_iterator): + if i in self._cached_output_packets: + # Use cached result + tag, packet = self._cached_output_packets[i] + if packet is not None: + yield tag, packet + else: + # Process packet + processed = self.pod.call(tag, packet) + if processed is not None: + # Update shared cache for future iterators (optimization) + self._cached_output_packets[i] = processed + tag, packet = processed + if packet is not None: + yield tag, packet + + # Mark completion by releasing the iterator + self._prepared_stream_iterator = None + else: + # Yield from snapshot of complete cache + for i in range(len(self._cached_output_packets)): + tag, packet = self._cached_output_packets[i] + if packet is not None: + yield tag, packet + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Returns the keys of the tag and packet columns in the stream. + This is useful for accessing the columns in the stream. + """ + + tag_keys, _ = self.prepared_stream.keys() + packet_keys = tuple(self.pod.output_packet_types().keys()) + return tag_keys, packet_keys + + def types(self) -> tuple[TypeSpec, TypeSpec]: + tag_typespec, _ = self.prepared_stream.types() + # TODO: check if copying can be avoided + packet_typespec = dict(self.pod.output_packet_types()) + return tag_typespec, packet_typespec + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: + if self._cached_output_table is None: + all_tags = [] + all_packets = [] + tag_schema, packet_schema = None, None + for tag, packet in self.iter_packets(): + if tag_schema is None: + tag_schema = tag.arrow_schema() + if packet_schema is None: + packet_schema = packet.arrow_schema( + include_context=True, + include_source=True, + ) + all_tags.append(tag.as_dict()) + # FIXME: using in the pinch conversion to str from path + # replace with an appropriate semantic converter-based approach! + dict_patcket = packet.as_dict(include_context=True, include_source=True) + for k, v in dict_patcket.items(): + if isinstance(v, Path): + dict_patcket[k] = str(v) + all_packets.append(dict_patcket) + + # FIXME: this skips the semantic version conversion and thus is not + # fully correct! + all_tags: pa.Table = pa.Table.from_pylist(all_tags, schema=tag_schema) + all_packets: pa.Table = pa.Table.from_pylist( + all_packets, schema=packet_schema + ) + + self._cached_output_table = arrow_utils.hstack_tables(all_tags, all_packets) + assert self._cached_output_table is not None, ( + "_cached_output_table should not be None here." + ) + + drop_columns = [] + if not include_source: + drop_columns.extend(f"{constants.SOURCE_PREFIX}{c}" for c in self.keys()[1]) + if not include_data_context: + drop_columns.append(constants.CONTEXT_KEY) + + output_table = self._cached_output_table.drop(drop_columns) + + # lazily prepare content hash column if requested + if include_content_hash: + if self._cached_content_hash_column is None: + content_hashes = [] + for tag, packet in self.iter_packets(): + content_hashes.append(packet.content_hash()) + self._cached_content_hash_column = pa.array( + content_hashes, type=pa.large_string() + ) + assert self._cached_content_hash_column is not None, ( + "_cached_content_hash_column should not be None here." + ) + hash_column_name = ( + "_content_hash" + if include_content_hash is True + else include_content_hash + ) + output_table = output_table.append_column( + hash_column_name, self._cached_content_hash_column + ) + return output_table + + +class PodStream(StreamBase): + def __init__( + self, + pod: dp.Pod, + input_streams: tuple[dp.Stream, ...], + error_handling: Literal["raise", "ignore", "warn"] = "raise", + **kwargs, + ) -> None: + super().__init__(upstreams=input_streams, **kwargs) + self.pod = pod + self.input_streams = input_streams + self.error_handling = error_handling + self._source = pod + + # Cache for processed packets + # This is a dictionary mapping the index of the packet in the input stream to a tuple of (Tag, Packet) + # This allows us to efficiently access the processed packets without re-processing them + self._cached_forward_stream: dp.Stream | None = None + self._cached_output_packets: dict[int, tuple[dp.Tag, dp.Packet]] = {} + self._computation_complete: bool = False + self._cached_output_table: pa.Table | None = None + self._cached_content_hash_column: pa.Array | None = None + + @property + def source(self) -> dp.Pod | None: + """ + The source of the stream, which is the pod that generated the stream. + This is typically used to track the origin of the stream in the computational graph. + """ + return self._source + + def forward_stream(self) -> dp.Stream: + if self._cached_forward_stream is None: + self._cached_forward_stream = self.pod.forward(*self.input_streams) + return self._cached_forward_stream + + @property + def is_current(self) -> bool: + return self.forward_stream().is_current + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Returns the keys of the tag and packet columns in the stream. + This is useful for accessing the columns in the stream. + """ + + tag_keys, _ = self.forward_stream().keys() + packet_keys = tuple(self.pod.output_packet_types().keys()) + return tag_keys, packet_keys + + def types(self) -> tuple[TypeSpec, TypeSpec]: + tag_typespec, _ = self.forward_stream().types() + # TODO: check if copying can be avoided + packet_typespec = dict(self.pod.output_packet_types()) + return tag_typespec, packet_typespec + + def clear_cache(self) -> None: + """ + Clears the cached results of the processed stream. + This is useful for re-processing the stream with the same processor. + """ + self._cached_forward_stream = None + self._cached_output_packets = {} + self._computation_complete = False + self._cached_output_table = None + self._cached_content_hash_column = None + + def refresh(self, force: bool = False) -> bool: + if not self.is_current or force: + self.invalidate() + return True + return False + + def invalidate(self) -> None: + """ + Invalidate the stream, marking it as needing recomputation. + This will clear the cached stream and set the last modified time to None. + """ + self.clear_cache() + self._set_modified_time(invalidate=True) + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: + # TODO: note that this is likely NOT multi-thread safe + self.refresh() + if self._cached_output_table is None: + all_tags = [] + all_packets = [] + tag_schema, packet_schema = None, None + for tag, packet in self.iter_packets(): + if tag_schema is None: + tag_schema = tag.arrow_schema() + if packet_schema is None: + packet_schema = packet.arrow_schema( + include_context=True, + include_source=True, + ) + all_tags.append(tag.as_dict()) + all_packets.append( + packet.as_dict(include_context=True, include_source=True) + ) + + all_tags: pa.Table = pa.Table.from_pylist(all_tags, schema=tag_schema) + all_packets: pa.Table = pa.Table.from_pylist( + all_packets, schema=packet_schema + ) + + self._cached_output_table = arrow_utils.hstack_tables(all_tags, all_packets) + assert self._cached_output_table is not None, ( + "_cached_output_table should not be None here." + ) + + drop_columns = [] + if not include_source: + drop_columns.extend(f"{constants.SOURCE_PREFIX}{c}" for c in self.keys()[1]) + if not include_data_context: + drop_columns.append(constants.CONTEXT_KEY) + + output_table = self._cached_output_table.drop(drop_columns) + + # lazily prepare content hash column if requested + if include_content_hash: + if self._cached_content_hash_column is None: + content_hashes = [] + for tag, packet in self.iter_packets(): + content_hashes.append(packet.content_hash()) + self._cached_content_hash_column = pa.array( + content_hashes, type=pa.large_string() + ) + assert self._cached_content_hash_column is not None, ( + "_cached_content_hash_column should not be None here." + ) + hash_column_name = ( + "_content_hash" + if include_content_hash is True + else include_content_hash + ) + output_table = output_table.append_column( + hash_column_name, self._cached_content_hash_column + ) + return output_table + + def iter_packets(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + self.refresh() + if not self._computation_complete or self._cached_output_packets is None: + for i, (tag, packet) in enumerate(self.forward_stream().iter_packets()): + if i not in self._cached_output_packets: + try: + processed_tag, processed_packet = self.pod.call(tag, packet) + except Exception as e: + logger.error(f"Error processing packet {packet}: {e}") + if self.error_handling == "raise": + raise e + elif self.error_handling == "warn": + warnings.warn(f"Error processing packet {packet}: {e}") + continue + elif self.error_handling == "ignore": + continue + else: + raise ValueError( + f"Unknown error handling mode: {self.error_handling} encountered while handling error:" + ) from e + if processed_packet is None: + # call returning None means the packet should be skipped + logger.debug( + f"Packet {packet} with tag {tag} was processed but returned None, skipping." + ) + continue + self._cached_output_packets[i] = (processed_tag, processed_packet) + yield processed_tag, processed_packet + self._computation_complete = True + self._set_modified_time() + + else: + for i in range(len(self._cached_output_packets)): + yield self._cached_output_packets[i] + + +class WrappedStream(StreamBase): + def __init__( + self, + stream: dp.Stream, + source: dp.Kernel, + input_streams: tuple[dp.Stream, ...], + label: str | None = None, + **kwargs, + ) -> None: + super().__init__(source=source, upstreams=input_streams, label=label, **kwargs) + self._stream = stream + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Returns the keys of the tag and packet columns in the stream. + This is useful for accessing the columns in the stream. + """ + return self._stream.keys() + + def types(self) -> tuple[TypeSpec, TypeSpec]: + """ + Returns the types of the tag and packet columns in the stream. + This is useful for accessing the types of the columns in the stream. + """ + return self._stream.types() + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: + """ + Returns the underlying table representation of the stream. + This is useful for converting the stream to a table format. + """ + return self._stream.as_table( + include_data_context=include_data_context, + include_source=include_source, + include_content_hash=include_content_hash, + ) + + def iter_packets(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + """ + Iterates over the packets in the stream. + Each packet is represented as a tuple of (Tag, Packet). + """ + return self._stream.iter_packets() + + def identity_structure(self) -> Any: + return self._stream.identity_structure() + + +class InvokedPodStream(StreamBase): + """ + Recomputable stream that wraps a streams produced by a kernel to provide + an abstraction over the stream, taking the stream's source and upstreams as the basis of + recomputing the stream. + + This stream is used to represent the output of a kernel invocation. + """ + + def __init__( + self, + pod_stream: PodStream | None = None, + source: dp.Pod | None = None, + upstreams: tuple[ + dp.Stream, ... + ] = (), # if provided, this will override the upstreams of the output_stream + **kwargs, + ) -> None: + if (pod_stream is None or output_stream.source is None) and source is None: + raise ValueError( + "Either output_stream must have a kernel assigned to it or source must be provided in order to be recomputable." + ) + if source is None: + if output_stream is None or output_stream.source is None: + raise ValueError( + "Either output_stream must have a kernel assigned to it or source must be provided in order to be recomputable." + ) + source = output_stream.source + upstreams = upstreams or output_stream.upstreams + + super().__init__(source=source, upstreams=upstreams, **kwargs) + self._cached_stream = output_stream + + def clear_cache(self) -> None: + """ + Clears the cached stream. + This is useful for re-processing the stream with the same kernel. + """ + self._cached_stream = None + self._set_modified_time(invalidate=True) + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Returns the keys of the tag and packet columns in the stream. + This is useful for accessing the columns in the stream. + """ + self.refresh() + assert self._cached_stream is not None, ( + "_cached_stream should not be None here." + ) + return self._cached_stream.keys() + + def types(self) -> tuple[TypeSpec, TypeSpec]: + """ + Returns the types of the tag and packet columns in the stream. + This is useful for accessing the types of the columns in the stream. + """ + self.refresh() + assert self._cached_stream is not None, ( + "_cached_stream should not be None here." + ) + return self._cached_stream.types() + + @property + def is_current(self) -> bool: + if self._cached_stream is None or not super().is_current: + status = self.refresh() + if not status: # if it failed to update for whatever reason + return False + return True + + def refresh(self, force: bool = False) -> bool: + updated = False + if force or (self._cached_stream is not None and not super().is_current): + self.clear_cache() + + if self._cached_stream is None: + assert self.source is not None, ( + "Stream source must be set to recompute the stream." + ) + self._cached_stream = self.source.forward(*self.upstreams) + self._set_modified_time() + updated = True + + if self._cached_stream is None: + # TODO: use beter error type + raise ValueError( + "Stream could not be updated. Ensure that the source is valid and upstreams are correct." + ) + + return updated + + def invalidate(self) -> None: + """ + Invalidate the stream, marking it as needing recomputation. + This will clear the cached stream and set the last modified time to None. + """ + self.clear_cache() + self._set_modified_time(invalidate=True) + + @property + def last_modified(self) -> datetime | None: + if self._cached_stream is None: + return None + return self._cached_stream.last_modified + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> pa.Table: + self.refresh() + assert self._cached_stream is not None, ( + "Stream has not been updated or is empty." + ) + return self._cached_stream.as_table( + include_data_context=include_data_context, + include_source=include_source, + include_content_hash=include_content_hash, + ) + + def iter_packets(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + self.refresh() + assert self._cached_stream is not None, ( + "Stream has not been updated or is empty." + ) + yield from self._cached_stream.iter_packets() + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(kernel={self.source}, upstreams={self.upstreams})" diff --git a/src/orcapod/data/system_constants.py b/src/orcapod/data/system_constants.py new file mode 100644 index 0000000..de1bebc --- /dev/null +++ b/src/orcapod/data/system_constants.py @@ -0,0 +1,25 @@ +# Constants used for source info keys +SYSTEM_COLUMN_PREFIX = "__" +SOURCE_INFO_PREFIX = "_source_" + +DATA_CONTEXT_KEY = "_context_key" + + +class SystemConstant: + def __init__(self, global_prefix: str = ""): + self._global_prefix = global_prefix + + @property + def META_PREFIX(self) -> str: + return f"{self._global_prefix}{SYSTEM_COLUMN_PREFIX}" + + @property + def SOURCE_PREFIX(self) -> str: + return f"{self._global_prefix}{SOURCE_INFO_PREFIX}" + + @property + def CONTEXT_KEY(self) -> str: + return f"{self._global_prefix}{DATA_CONTEXT_KEY}" + + +orcapod_constants = SystemConstant() diff --git a/src/orcapod/data/trackers.py b/src/orcapod/data/trackers.py new file mode 100644 index 0000000..799334e --- /dev/null +++ b/src/orcapod/data/trackers.py @@ -0,0 +1,316 @@ +from orcapod.data.base import LabeledContentIdentifiableBase +from orcapod.protocols import data_protocols as dp, hashing_protocols as hp +from orcapod.data.context import DataContext +from orcapod.hashing.defaults import get_default_object_hasher +from collections import defaultdict +from collections.abc import Generator, Collection +from abc import ABC, abstractmethod +from typing import Any, TYPE_CHECKING +from contextlib import contextmanager + +if TYPE_CHECKING: + import networkx as nx + + +class BasicTrackerManager: + def __init__(self) -> None: + self._active_trackers: list[dp.Tracker] = [] + self._active = True + + def set_active(self, active: bool = True) -> None: + """ + Set the active state of the tracker manager. + This is used to enable or disable the tracker manager. + """ + self._active = active + + def register_tracker(self, tracker: dp.Tracker) -> None: + """ + Register a new tracker in the system. + This is used to add a new tracker to the list of active trackers. + """ + if tracker not in self._active_trackers: + self._active_trackers.append(tracker) + + def deregister_tracker(self, tracker: dp.Tracker) -> None: + """ + Remove a tracker from the system. + This is used to deactivate a tracker and remove it from the list of active trackers. + """ + if tracker in self._active_trackers: + self._active_trackers.remove(tracker) + + def get_active_trackers(self) -> list[dp.Tracker]: + """ + Get the list of active trackers. + This is used to retrieve the currently active trackers in the system. + """ + if not self._active: + return [] + # Filter out inactive trackers + # This is to ensure that we only return trackers that are currently active + return [t for t in self._active_trackers if t.is_active()] + + def record_kernel_invocation( + self, + kernel: dp.Kernel, + upstreams: tuple[dp.Stream, ...], + label: str | None = None, + ) -> None: + """ + Record the output stream of a kernel invocation in the tracker. + This is used to track the computational graph and the invocations of kernels. + """ + for tracker in self.get_active_trackers(): + tracker.record_kernel_invocation(kernel, upstreams, label=label) + + def record_pod_invocation( + self, pod: dp.Pod, upstreams: tuple[dp.Stream, ...], label: str | None = None + ) -> None: + """ + Record the output stream of a pod invocation in the tracker. + This is used to track the computational graph and the invocations of pods. + """ + for tracker in self.get_active_trackers(): + tracker.record_pod_invocation(pod, upstreams, label=label) + + @contextmanager + def no_tracking(self) -> Generator[None, Any, None]: + original_state = self._active + self.set_active(False) + try: + yield + finally: + self.set_active(original_state) + + +class AutoRegisteringContextBasedTracker(ABC): + def __init__(self, tracker_manager: dp.TrackerManager | None = None) -> None: + self._tracker_manager = tracker_manager or DEFAULT_TRACKER_MANAGER + self._active = False + + def set_active(self, active: bool = True) -> None: + if active: + self._tracker_manager.register_tracker(self) + else: + self._tracker_manager.deregister_tracker(self) + self._active = active + + def is_active(self) -> bool: + return self._active + + @abstractmethod + def record_kernel_invocation( + self, + kernel: dp.Kernel, + upstreams: tuple[dp.Stream, ...], + label: str | None = None, + ) -> None: ... + + @abstractmethod + def record_pod_invocation( + self, pod: dp.Pod, upstreams: tuple[dp.Stream, ...], label: str | None = None + ) -> None: ... + + def __enter__(self): + self.set_active(True) + return self + + def __exit__(self, exc_type, exc_val, ext_tb): + self.set_active(False) + + +class StubKernel: + def __init__(self, stream: dp.Stream, label: str | None = None) -> None: + """ + A placeholder kernel that does nothing. + This is used to represent a kernel that has no computation. + """ + self.label = label or stream.label + self.stream = stream + + def forward(self, *args: Any, **kwargs: Any) -> dp.Stream: + """ + Forward the stream through the stub kernel. + This is a no-op and simply returns the stream. + """ + return self.stream + + def __call__(self, *args: Any, **kwargs: Any) -> dp.Stream: + return self.forward(*args, **kwargs) + + def identity_structure(self, streams: Collection[dp.Stream] | None = None) -> Any: + # FIXME: using label as a stop-gap for identity structure + return self.label + + def __hash__(self) -> int: + # TODO: resolve the logic around identity structure on a stream / stub kernel + """ + Hash the StubKernel based on its label and stream. + This is used to uniquely identify the StubKernel in the tracker. + """ + identity_structure = self.identity_structure() + if identity_structure is None: + return hash(self.stream) + return identity_structure + + +class Invocation(LabeledContentIdentifiableBase): + def __init__( + self, + kernel: dp.Kernel, + upstreams: tuple[dp.Stream, ...] = (), + label: str | None = None, + ) -> None: + """ + Represents an invocation of a kernel with its upstream streams. + This is used to track the computational graph and the invocations of kernels. + """ + super().__init__(label=label) + self.kernel = kernel + self.upstreams = upstreams + + def parents(self) -> tuple["Invocation", ...]: + parent_invoctions = [] + for stream in self.upstreams: + if stream.source is not None: + parent_invoctions.append(Invocation(stream.source, stream.upstreams)) + else: + source = StubKernel(stream) + parent_invoctions.append(Invocation(source)) + + return tuple(parent_invoctions) + + def computed_label(self) -> str | None: + """ + Compute a label for this invocation based on its kernel and upstreams. + If label is not explicitly set for this invocation and computed_label returns a valid value, + it will be used as label of this invocation. + """ + return self.kernel.label + + def identity_structure(self) -> Any: + """ + Return a structure that represents the identity of this invocation. + This is used to uniquely identify the invocation in the tracker. + """ + return self.kernel.identity_structure(self.upstreams) + + def __repr__(self) -> str: + return f"Invocation(kernel={self.kernel}, upstreams={self.upstreams}, label={self.label})" + + +class GraphTracker(AutoRegisteringContextBasedTracker): + """ + A tracker that records the invocations of operations and generates a graph + of the invocations and their dependencies. + """ + + # Thread-local storage to track active trackers + + def __init__( + self, + tracker_manager: dp.TrackerManager | None = None, + data_context: str | DataContext | None = None, + ) -> None: + super().__init__(tracker_manager=tracker_manager) + self._data_context = DataContext.resolve_data_context(data_context) + + # Dictionary to map kernels to the streams they have invoked + # This is used to track the computational graph and the invocations of kernels + self.kernel_invocations: set[Invocation] = set() + self.invocation_to_pod_lut: dict[Invocation, dp.Pod] = {} + self.id_to_invocation_lut: dict[str, Invocation] = {} + self.id_to_label_lut: dict[str, list[str]] = defaultdict(list) + self.id_to_pod_lut: dict[str, dp.Pod] = {} + + def _record_kernel_and_get_invocation( + self, + kernel: dp.Kernel, + upstreams: tuple[dp.Stream, ...], + label: str | None = None, + ) -> Invocation: + invocation = Invocation(kernel, upstreams, label=label) + self.kernel_invocations.add(invocation) + return invocation + + def record_kernel_invocation( + self, + kernel: dp.Kernel, + upstreams: tuple[dp.Stream, ...], + label: str | None = None, + ) -> None: + """ + Record the output stream of a kernel invocation in the tracker. + This is used to track the computational graph and the invocations of kernels. + """ + self._record_kernel_and_get_invocation(kernel, upstreams, label) + + def record_pod_invocation( + self, pod: dp.Pod, upstreams: tuple[dp.Stream, ...], label: str | None = None + ) -> None: + """ + Record the output stream of a pod invocation in the tracker. + """ + invocation = self._record_kernel_and_get_invocation(pod, upstreams, label) + self.invocation_to_pod_lut[invocation] = pod + + def reset(self) -> dict[dp.Kernel, list[dp.Stream]]: + """ + Reset the tracker and return the recorded invocations. + """ + recorded_streams = self.kernel_to_invoked_stream_lut + self.kernel_to_invoked_stream_lut = defaultdict(list) + return recorded_streams + + def generate_graph(self) -> "nx.DiGraph": + import networkx as nx + + G = nx.DiGraph() + + # Add edges for each invocation + for invocation in self.kernel_invocations: + G.add_node(invocation) + for upstream_invocation in invocation.parents(): + G.add_edge(upstream_invocation, invocation) + return G + + # def generate_namemap(self) -> dict[Invocation, str]: + # namemap = {} + # for kernel, invocations in self.invocation_lut.items(): + # # if only one entry present, use the kernel name alone + # if kernel.label is not None: + # node_label = kernel.label + # else: + # node_label = str(kernel) + # if len(invocations) == 1: + # namemap[invocations[0]] = node_label + # continue + # # if multiple entries, use the kernel name and index + # for idx, invocation in enumerate(invocations): + # namemap[invocation] = f"{node_label}_{idx}" + # return namemap + + # def draw_graph(self): + # import networkx as nx + # import matplotlib.pyplot as plt + + # G = self.generate_graph() + # labels = self.generate_namemap() + + # pos = nx.drawing.nx_agraph.graphviz_layout(G, prog="dot") + # nx.draw( + # G, + # pos, + # labels=labels, + # node_size=2000, + # node_color="lightblue", + # with_labels=True, + # font_size=10, + # font_weight="bold", + # arrowsize=20, + # ) + # plt.tight_layout() + + +DEFAULT_TRACKER_MANAGER = BasicTrackerManager() diff --git a/src/orcapod/dj/mapper.py b/src/orcapod/dj/mapper.py index a38fdaf..efec07c 100644 --- a/src/orcapod/dj/mapper.py +++ b/src/orcapod/dj/mapper.py @@ -1,18 +1,18 @@ import warnings from typing import Optional -from orcapod.mappers import Join, MapPackets, Mapper, MapTags +from orcapod.core.operators import Join, MapPackets, MapTags, Operator from .operation import QueryOperation from .stream import QueryStream -class QueryMapper(QueryOperation, Mapper): +class QueryMapper(QueryOperation, Operator): """ A special type of mapper that returns and works with QueryStreams """ -def convert_to_query_mapper(operation: Mapper) -> QueryMapper: +def convert_to_query_mapper(operation: Operator) -> QueryMapper: """ Convert a generic mapper to an equivalent, Query mapper """ diff --git a/src/orcapod/dj/operation.py b/src/orcapod/dj/operation.py index d4d5a81..70b218e 100644 --- a/src/orcapod/dj/operation.py +++ b/src/orcapod/dj/operation.py @@ -1,8 +1,8 @@ -from ..base import Operation +from orcapod.core.base import Kernel from .stream import QueryStream -class QueryOperation(Operation): +class QueryOperation(Kernel): """ A special type of operation that returns and works with QueryStreams diff --git a/src/orcapod/dj/pod.py b/src/orcapod/dj/pod.py index 815b2dc..7101090 100644 --- a/src/orcapod/dj/pod.py +++ b/src/orcapod/dj/pod.py @@ -5,7 +5,7 @@ from datajoint import Schema from datajoint.table import Table -from ..pod import FunctionPod, Pod +from orcapod.core.pod import FunctionPod, Pod from ..utils.name import pascal_to_snake, snake_to_pascal from .mapper import JoinQuery from .operation import QueryOperation diff --git a/src/orcapod/dj/source.py b/src/orcapod/dj/source.py index 8af3f23..0eaa6dc 100644 --- a/src/orcapod/dj/source.py +++ b/src/orcapod/dj/source.py @@ -6,12 +6,12 @@ from orcapod.hashing import hash_to_uuid -from orcapod.sources import Source -from orcapod.streams import SyncStream -from ..utils.name import pascal_to_snake, snake_to_pascal -from ..utils.stream_utils import common_elements -from .operation import QueryOperation -from .stream import QueryStream, TableCachedStream, TableStream +from orcapod.core.sources import Source +from orcapod.core.streams import SyncStream +from orcapod.utils.name import pascal_to_snake, snake_to_pascal +from orcapod.utils.stream_utils import common_elements +from orcapod.dj.operation import QueryOperation +from orcapod.dj.stream import QueryStream, TableCachedStream, TableStream logger = logging.getLogger(__name__) diff --git a/src/orcapod/dj/stream.py b/src/orcapod/dj/stream.py index 3e4eb08..e8e7195 100644 --- a/src/orcapod/dj/stream.py +++ b/src/orcapod/dj/stream.py @@ -5,7 +5,7 @@ from datajoint.expression import QueryExpression from datajoint.table import Table -from orcapod.streams import SyncStream +from orcapod.core.streams import SyncStream logger = logging.getLogger(__name__) diff --git a/src/orcapod/dj/tracker.py b/src/orcapod/dj/tracker.py index b137e54..3276ba9 100644 --- a/src/orcapod/dj/tracker.py +++ b/src/orcapod/dj/tracker.py @@ -6,10 +6,10 @@ import networkx as nx from datajoint import Schema -from orcapod.base import Operation, Source -from orcapod.mappers import Mapper, Merge -from orcapod.pod import FunctionPod -from orcapod.pipeline import GraphTracker +from orcapod.core.base import Kernel, Source +from orcapod.core.operators import Operator, Merge +from orcapod.core.pod import FunctionPod +from orcapod.core.tracker import GraphTracker from .mapper import convert_to_query_mapper from .operation import QueryOperation @@ -19,7 +19,7 @@ def convert_to_query_operation( - operation: Operation, + operation: Kernel, schema: Schema, table_name: str = None, table_postfix: str = "", @@ -68,7 +68,7 @@ def convert_to_query_operation( True, ) - if isinstance(operation, Mapper): + if isinstance(operation, Operator): return convert_to_query_mapper(operation), True # operation conversion is not supported, raise an error @@ -102,7 +102,7 @@ def generate_tables( for invocation in nx.topological_sort(G): streams = [edge_lut.get(stream, stream) for stream in invocation.streams] new_node, converted = convert_to_query_operation( - invocation.operation, + invocation.kernel, schema, table_name=None, table_postfix=invocation.content_hash_int(), diff --git a/src/orcapod/errors.py b/src/orcapod/errors.py new file mode 100644 index 0000000..b1566cd --- /dev/null +++ b/src/orcapod/errors.py @@ -0,0 +1,5 @@ +class InputValidationError(Exception): + """ + Exception raised when the inputs are not valid. + This is used to indicate that the inputs do not meet the requirements of the operator. + """ diff --git a/src/orcapod/hashing/__init__.py b/src/orcapod/hashing/__init__.py index 98a15da..eb94afe 100644 --- a/src/orcapod/hashing/__init__.py +++ b/src/orcapod/hashing/__init__.py @@ -1,37 +1,15 @@ -from .core import ( - HashableMixin, - function_content_hash, - get_function_signature, - hash_file, - hash_function, - hash_packet, - hash_pathset, - hash_to_hex, - hash_to_int, - hash_to_uuid, -) from .defaults import ( - get_default_composite_file_hasher, get_default_object_hasher, get_default_arrow_hasher, ) -from .types import ( - FileHasher, - PacketHasher, - ArrowHasher, - ObjectHasher, - StringCacher, - FunctionInfoExtractor, - CompositeFileHasher, -) + __all__ = [ - "FileHasher", - "PacketHasher", - "ArrowHasher", + "FileContentHasher", + "LegacyPacketHasher", "StringCacher", "ObjectHasher", - "CompositeFileHasher", + "LegacyCompositeFileHasher", "FunctionInfoExtractor", "hash_file", "hash_pathset", @@ -46,4 +24,5 @@ "get_default_composite_file_hasher", "get_default_object_hasher", "get_default_arrow_hasher", + "ContentIdentifiableBase", ] diff --git a/src/orcapod/hashing/semantic_arrow_hasher.py b/src/orcapod/hashing/arrow_hashers.py similarity index 58% rename from src/orcapod/hashing/semantic_arrow_hasher.py rename to src/orcapod/hashing/arrow_hashers.py index f3682ed..264caad 100644 --- a/src/orcapod/hashing/semantic_arrow_hasher.py +++ b/src/orcapod/hashing/arrow_hashers.py @@ -1,87 +1,39 @@ import hashlib -import os -from typing import Any, Protocol -from abc import ABC, abstractmethod +from typing import Any import pyarrow as pa -import pyarrow.ipc as ipc -from io import BytesIO -import polars as pl +import json +from orcapod.protocols.hashing_protocols import SemanticTypeHasher, StringCacher +from orcapod.hashing import arrow_serialization +from collections.abc import Callable +SERIALIZATION_METHOD_LUT: dict[str, Callable[[pa.Table], bytes]] = { + "logical": arrow_serialization.serialize_table_logical, +} -class SemanticTypeHasher(Protocol): - """Abstract base class for semantic type-specific hashers.""" - @abstractmethod - def hash_column(self, column: pa.Array) -> bytes: - """Hash a column with this semantic type and return the hash bytes.""" - pass - - -class PathHasher(SemanticTypeHasher): - """Hasher for Path semantic type columns - hashes file contents.""" - - def __init__(self, chunk_size: int = 8192, handle_missing: str = "error"): - """ - Initialize PathHasher. - - Args: - chunk_size: Size of chunks to read files in bytes - handle_missing: How to handle missing files ('error', 'skip', 'null_hash') - """ - self.chunk_size = chunk_size - self.handle_missing = handle_missing - - def _hash_file_content(self, file_path: str) -> str: - """Hash the content of a single file and return hex string.""" - import os - - try: - if not os.path.exists(file_path): - if self.handle_missing == "error": - raise FileNotFoundError(f"File not found: {file_path}") - elif self.handle_missing == "skip": - return hashlib.sha256(b"").hexdigest() - elif self.handle_missing == "null_hash": - return hashlib.sha256(b"").hexdigest() - - hasher = hashlib.sha256() - - # Read file in chunks to handle large files efficiently - with open(file_path, "rb") as f: - while chunk := f.read(self.chunk_size): - hasher.update(chunk) - - return hasher.hexdigest() +def serialize_pyarrow_table(table: pa.Table) -> str: + """ + Serialize a PyArrow table to a stable JSON string by converting to dictionary of lists. - except (IOError, OSError, PermissionError) as e: - if self.handle_missing == "error": - raise IOError(f"Cannot read file {file_path}: {e}") - else: # skip or null_hash - error_msg = f"" - return hashlib.sha256(error_msg.encode("utf-8")).hexdigest() + Args: + table: PyArrow table to serialize - def hash_column(self, column: pa.Array) -> pa.Array: - """ - Replace path column with file content hashes. - Returns a new array where each path is replaced with its file content hash. - """ + Returns: + JSON string representation with sorted keys and no whitespace + """ + # Convert table to dictionary of lists using to_pylist() + data_dict = {} - # Convert to python list for processing - paths = column.to_pylist() - - # Hash each file's content individually - content_hashes = [] - for path in paths: - if path is not None: - # Normalize path for consistency - normalized_path = os.path.normpath(str(path)) - file_content_hash = self._hash_file_content(normalized_path) - content_hashes.append(file_content_hash) - else: - content_hashes.append(None) # Preserve nulls + for column_name in table.column_names: + # Convert Arrow column to Python list, which visits all elements + data_dict[column_name] = table.column(column_name).to_pylist() - # Return new array with content hashes instead of paths - return pa.array(content_hashes) + # Serialize to JSON with sorted keys and no whitespace + return json.dumps( + data_dict, + separators=(",", ":"), + sort_keys=True, + ) class SemanticArrowHasher: @@ -95,7 +47,16 @@ class SemanticArrowHasher: 4. Computes final hash of the processed packet """ - def __init__(self, chunk_size: int = 8192, handle_missing: str = "error"): + def __init__( + self, + hash_algorithm: str = "sha256", + semantic_type_hashers: dict[str, SemanticTypeHasher] | None = None, + chunk_size: int = 8192, + hasher_id: str | None = None, + handle_missing: str = "error", + serialization_method: str = "logical", + # TODO: consider passing options for serialization method + ): """ Initialize SemanticArrowHasher. @@ -103,9 +64,37 @@ def __init__(self, chunk_size: int = 8192, handle_missing: str = "error"): chunk_size: Size of chunks to read files in bytes handle_missing: How to handle missing files ('error', 'skip', 'null_hash') """ + if hasher_id is None: + hasher_id = f"semantic_arrow_hasher:{hash_algorithm}:{serialization_method}" + self._hasher_id = hasher_id self.chunk_size = chunk_size self.handle_missing = handle_missing - self.semantic_type_hashers: dict[str, SemanticTypeHasher] = {} + self.semantic_type_hashers: dict[str, SemanticTypeHasher] = ( + semantic_type_hashers or {} + ) + self.hash_algorithm = hash_algorithm + if serialization_method not in SERIALIZATION_METHOD_LUT: + raise ValueError( + f"Invalid serialization method '{serialization_method}'. " + f"Supported methods: {list(SERIALIZATION_METHOD_LUT.keys())}" + ) + self.serialization_method = serialization_method + self._serialize_arrow_table = SERIALIZATION_METHOD_LUT[serialization_method] + + def set_cacher(self, semantic_type: str, cacher: StringCacher) -> None: + """ + Add a string cacher for caching hash values. + + This is a no-op for SemanticArrowHasher since it hashes column contents directly. + """ + if semantic_type in self.semantic_type_hashers: + self.semantic_type_hashers[semantic_type].set_cacher(cacher) + else: + raise KeyError(f"No hasher registered for semantic type '{semantic_type}'") + + @property + def hasher_id(self) -> str: + return self._hasher_id def register_semantic_hasher(self, semantic_type: str, hasher: SemanticTypeHasher): """Register a custom hasher for a semantic type.""" @@ -125,7 +114,11 @@ def _get_semantic_type(self, field: pa.Field) -> str | None: return None def _create_hash_column( - self, original_column: pa.Array, hash_bytes: bytes, original_field: pa.Field + self, + original_column: pa.Array, + hash_bytes: bytes, + original_field: pa.Field, + hash_algorithm: str | None = None, ) -> tuple[pa.Array, pa.Field]: """Create a new column containing the hash bytes.""" # Create array of hash bytes (one hash value repeated for each row) @@ -138,11 +131,11 @@ def _create_hash_column( "semantic_type", "unknown" ) new_metadata["semantic_type"] = "hash" - new_metadata["hash_algorithm"] = "sha256" + new_metadata["hash_algorithm"] = hash_algorithm or self.hasher_id new_field = pa.field( original_field.name, - pa.string(), # Hash stored as string + pa.large_string(), # Hash stored as large string nullable=original_field.nullable, metadata=new_metadata, ) @@ -193,23 +186,23 @@ def _sort_table_columns(self, table: pa.Table) -> pa.Table: sorted_schema = pa.schema(sorted_fields) return pa.table(sorted_columns, schema=sorted_schema) - def _serialize_table_ipc(self, table: pa.Table) -> bytes: - """Serialize table using Arrow IPC format for stable binary representation.""" - buffer = BytesIO() + # def _serialize_table_ipc(self, table: pa.Table) -> bytes: + # # TODO: fix and use logical table hashing instead + # """Serialize table using Arrow IPC format for stable binary representation.""" + # buffer = BytesIO() - # Use IPC stream format for deterministic serialization - with ipc.new_stream(buffer, table.schema) as writer: - writer.write_table(table) + # # Use IPC stream format for deterministic serialization + # with ipc.new_stream(buffer, table.schema) as writer: + # writer.write_table(table) - return buffer.getvalue() + # return buffer.getvalue() - def hash_table(self, table: pa.Table, algorithm: str = "sha256") -> str: + def hash_table(self, table: pa.Table, prefix_hasher_id: bool = True) -> str: """ Compute stable hash of Arrow table. Args: table: Arrow table to hash - algorithm: Hash algorithm to use ('sha256', 'md5', etc.) Returns: Hex string of the computed hash @@ -223,20 +216,24 @@ def hash_table(self, table: pa.Table, algorithm: str = "sha256") -> str: # normalize all string to large strings by passing through polars # TODO: consider cleaner approach in the future + import polars as pl + sorted_table = pl.DataFrame(sorted_table).to_arrow() # Step 3: Serialize using Arrow IPC format - serialized_bytes = self._serialize_table_ipc(sorted_table) + serialized_bytes = self._serialize_arrow_table(sorted_table) # Step 4: Compute final hash - hasher = hashlib.new(algorithm) + hasher = hashlib.new(self.hash_algorithm) hasher.update(serialized_bytes) - return hasher.hexdigest() + hash_str = hasher.hexdigest() + if prefix_hasher_id: + hash_str = f"{self.hasher_id}@{hash_str}" + + return hash_str - def hash_table_with_metadata( - self, table: pa.Table, algorithm: str = "sha256" - ) -> dict[str, Any]: + def hash_table_with_metadata(self, table: pa.Table) -> dict[str, Any]: """ Compute hash with additional metadata about the process. @@ -257,11 +254,10 @@ def hash_table_with_metadata( processed_columns.append(column_info) # Compute hash - table_hash = self.hash_table(table, algorithm) + table_hash = self.hash_table(table) return { "hash": table_hash, - "algorithm": algorithm, "num_rows": len(table), "num_columns": len(table.schema), "processed_columns": processed_columns, diff --git a/src/orcapod/hashing/arrow_serialization.py b/src/orcapod/hashing/arrow_serialization.py new file mode 100644 index 0000000..fa0500f --- /dev/null +++ b/src/orcapod/hashing/arrow_serialization.py @@ -0,0 +1,1246 @@ +import pyarrow as pa +import pyarrow.compute as pc +from io import BytesIO +import struct +from typing import Any +import hashlib + + +def bool_sequence_to_byte(sequence: list[bool]) -> bytes: + """Convert a sequence of booleans to a byte array.""" + if len(sequence) > 8: + raise ValueError("Sequence length exceeds 8 bits, cannot fit in a byte.") + mask = 1 + flags = 0 + for value in sequence: + if value: + flags |= mask + mask <<= 1 + return struct.pack(" bytes: + """Serialize order options to bytes for inclusion in format.""" + flags = 0 + if self.ignore_column_order: + flags |= 1 + if self.ignore_row_order: + flags |= 2 + return struct.pack(" "OrderOptions": + """Deserialize order options from bytes.""" + flags = struct.unpack(" pa.Array: + """ + Convert any Arrow array to string representation for sorting purposes. + Handles all data types including complex ones. + """ + if pa.types.is_string(array.type) or pa.types.is_large_string(array.type): + # Already string + return array + + elif pa.types.is_binary(array.type) or pa.types.is_large_binary(array.type): + # Convert binary to base64 string representation for deterministic sorting + try: + # Use Arrow's base64 encoding if available + import base64 + + str_values = [] + # Get null mask + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) # Will be handled by fill_null later + else: + binary_val = array[i].as_py() + if binary_val is not None: + str_values.append(base64.b64encode(binary_val).decode("ascii")) + else: + str_values.append(None) + return pa.array(str_values, type=pa.string()) + except Exception: + # Fallback: convert to hex string + str_values = [] + try: + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) + else: + try: + binary_val = array[i].as_py() + if binary_val is not None: + str_values.append(binary_val.hex()) + else: + str_values.append(None) + except Exception: + str_values.append(f"BINARY_{i}") + except Exception: + # If null checking fails, just convert all values + for i in range(len(array)): + try: + binary_val = array[i].as_py() + if binary_val is not None: + str_values.append(binary_val.hex()) + else: + str_values.append(None) + except Exception: + str_values.append(f"BINARY_{i}") + return pa.array(str_values, type=pa.string()) + + elif _is_primitive_type(array.type): + # Convert primitive types to string + try: + return pc.cast(array, pa.string()) + except Exception: + # Manual conversion for types that don't cast well + str_values = [] + try: + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) + else: + try: + value = array[i].as_py() + str_values.append(str(value)) + except Exception: + str_values.append(f"PRIMITIVE_{i}") + except Exception: + # If null checking fails, just convert all values + for i in range(len(array)): + try: + value = array[i].as_py() + if value is not None: + str_values.append(str(value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"PRIMITIVE_{i}") + return pa.array(str_values, type=pa.string()) + + elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): + # Convert list to string representation + str_values = [] + try: + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) + else: + try: + value = array[i].as_py() + # Sort list elements for consistent representation + if value is not None: + sorted_value = sorted( + value, key=lambda x: (x is None, str(x)) + ) + str_values.append(str(sorted_value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"LIST_{i}") + except Exception: + # If null checking fails, just convert all values + for i in range(len(array)): + try: + value = array[i].as_py() + if value is not None: + sorted_value = sorted(value, key=lambda x: (x is None, str(x))) + str_values.append(str(sorted_value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"LIST_{i}") + return pa.array(str_values, type=pa.string()) + + elif pa.types.is_struct(array.type): + # Convert struct to string representation + str_values = [] + try: + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) + else: + try: + value = array[i].as_py() + if value is not None: + # Sort dict keys for consistent representation + if isinstance(value, dict): + sorted_items = sorted( + value.items(), key=lambda x: str(x[0]) + ) + str_values.append(str(dict(sorted_items))) + else: + str_values.append(str(value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"STRUCT_{i}") + except Exception: + # If null checking fails, just convert all values + for i in range(len(array)): + try: + value = array[i].as_py() + if value is not None: + if isinstance(value, dict): + sorted_items = sorted( + value.items(), key=lambda x: str(x[0]) + ) + str_values.append(str(dict(sorted_items))) + else: + str_values.append(str(value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"STRUCT_{i}") + return pa.array(str_values, type=pa.string()) + + elif pa.types.is_dictionary(array.type): + # Convert dictionary to string representation using the decoded values + str_values = [] + try: + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) + else: + try: + value = array[i].as_py() + str_values.append(str(value)) + except Exception: + str_values.append(f"DICT_{i}") + except Exception: + # If null checking fails, just convert all values + for i in range(len(array)): + try: + value = array[i].as_py() + if value is not None: + str_values.append(str(value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"DICT_{i}") + return pa.array(str_values, type=pa.string()) + + else: + # Generic fallback for any other types + try: + return pc.cast(array, pa.string()) + except Exception: + # Manual conversion as last resort + str_values = [] + try: + null_mask = pc.is_null(array) # type: ignore + for i in range(len(array)): + if null_mask[i].as_py(): + str_values.append(None) + else: + try: + value = array[i].as_py() + str_values.append(str(value)) + except Exception: + str_values.append(f"UNKNOWN_{array.type}_{i}") + except Exception: + # If null checking fails, just convert all values + for i in range(len(array)): + try: + value = array[i].as_py() + if value is not None: + str_values.append(str(value)) + else: + str_values.append(None) + except Exception: + str_values.append(f"UNKNOWN_{array.type}_{i}") + return pa.array(str_values, type=pa.string()) + + +def _create_row_sort_key(table: pa.Table) -> pa.Array: + """ + Create a deterministic sort key for rows by combining all column values. + This ensures consistent row ordering regardless of input order. + """ + if table.num_rows == 0: + return pa.array([], type=pa.string()) + + # Convert each column to string representation for sorting + sort_components = [] + + for i in range(table.num_columns): + column = table.column(i) + field = table.schema.field(i) + + # Combine all chunks into a single array + if column.num_chunks > 1: + combined_array = pa.concat_arrays(column.chunks) + elif column.num_chunks == 1: + combined_array = column.chunk(0) + else: + combined_array = pa.array([], type=field.type) + + # Convert to string representation for sorting + str_array = _convert_array_to_string_for_sorting(combined_array) + + # Handle nulls by replacing with a consistent null representation + str_array = pc.fill_null(str_array, "NULL") + sort_components.append(str_array) + + # Combine all columns into a single sort key + if len(sort_components) == 1: + return sort_components[0] + else: + # Concatenate all string representations with separators + separator = pa.scalar("||") + combined = sort_components[0] + for component in sort_components[1:]: + combined = pc.binary_join_element_wise(combined, separator, component) # type: ignore + return combined + + +def _sort_table_by_content(table: pa.Table) -> pa.Table: + """Sort table rows based on content for deterministic ordering.""" + if table.num_rows <= 1: + return table + + # Create sort key + sort_key = _create_row_sort_key(table) + + # Get sort indices + sort_indices = pc.sort_indices(sort_key) # type: ignore + + # Apply sort to table + return pc.take(table, sort_indices) + + +def _sort_table_columns_by_name(table: pa.Table) -> pa.Table: + """Sort table columns alphabetically by name for deterministic ordering.""" + if table.num_columns <= 1: + return table + + # Get column names and sort them + column_names = [field.name for field in table.schema] + sorted_names = sorted(column_names) + + # If already sorted, return as-is + if column_names == sorted_names: + return table + + # Reorder columns + return table.select(sorted_names) + + +def serialize_table_logical( + table: pa.Table, order_options: OrderOptions | None = None +) -> bytes: + """ + Serialize table using column-wise processing with direct binary data access. + + This implementation works directly with Arrow's underlying binary buffers + without converting to Python objects, making it much faster and more + memory efficient while maintaining high repeatability. + + Args: + table: PyArrow table to serialize + order_options: Options for handling column and row order independence + """ + if order_options is None: + order_options = OrderOptions() + + buffer = BytesIO() + + # Write format version + buffer.write(b"ARROW_BINARY_V1") # Updated version to include order options + + # Write order options + buffer.write(order_options.to_bytes()) + + # Apply ordering transformations if requested + processed_table = table + + if order_options.ignore_column_order: + processed_table = _sort_table_columns_by_name(processed_table) + + if order_options.ignore_row_order: + processed_table = _sort_table_by_content(processed_table) + + # Serialize schema deterministically + _serialize_schema_deterministic(buffer, processed_table.schema) + + # Process each column using direct binary access + column_digests = [] + for i in range(processed_table.num_columns): + column = processed_table.column(i) + field = processed_table.schema.field(i) + column_digest = _serialize_column_binary(column, field) + column_digests.append(column_digest) + + # Combine column digests + for digest in column_digests: + buffer.write(digest) + + return buffer.getvalue() + + +def _serialize_schema_deterministic(buffer: BytesIO, schema: pa.Schema) -> None: + """Serialize schema information deterministically.""" + buffer.write(struct.pack(" None: + """Serialize Arrow data type deterministically.""" + type_id = data_type.id + buffer.write(struct.pack(" bytes: + """ + Serialize column using direct binary buffer access. + + To ensure chunking independence, we combine chunks into a single array + before processing. This ensures identical output regardless of chunk boundaries. + """ + buffer = BytesIO() + + # Combine all chunks into a single array for consistent processing + if column.num_chunks > 1: + # Multiple chunks - combine them using pa.concat_arrays + combined_array = pa.concat_arrays(column.chunks) + elif column.num_chunks == 1: + # Single chunk - use directly + combined_array = column.chunk(0) + else: + # No chunks - create empty array + combined_array = pa.array([], type=field.type) + + # Process the combined array + chunk_result = _serialize_array_binary(combined_array, field.type) + buffer.write(chunk_result) + + return buffer.getvalue() + + +def _serialize_array_binary(array: pa.Array, data_type: pa.DataType) -> bytes: + """Serialize array using direct access to Arrow's binary buffers.""" + buffer = BytesIO() + + # Get validity buffer (null bitmap) if it exists + validity_buffer = None + if array.buffers()[0] is not None: + validity_buffer = array.buffers()[0] + + # Process based on Arrow type, accessing buffers directly + try: + if _is_primitive_type(data_type): + _serialize_primitive_array_binary(buffer, array, data_type, validity_buffer) + + elif pa.types.is_string(data_type) or pa.types.is_large_string(data_type): + _serialize_string_array_binary(buffer, array, data_type, validity_buffer) + + elif pa.types.is_binary(data_type) or pa.types.is_large_binary(data_type): + _serialize_binary_array_binary(buffer, array, data_type, validity_buffer) + + elif pa.types.is_list(data_type) or pa.types.is_large_list(data_type): + _serialize_list_array_binary(buffer, array, data_type, validity_buffer) + + elif pa.types.is_struct(data_type): + _serialize_struct_array_binary(buffer, array, data_type, validity_buffer) + + elif pa.types.is_dictionary(data_type): + _serialize_dictionary_array_binary( + buffer, array, data_type, validity_buffer + ) + + else: + # Fallback to element-wise processing for complex types + _serialize_array_fallback(buffer, array, data_type, validity_buffer) + + except Exception as e: + # If binary serialization fails, fall back to element-wise processing + print( + f"Warning: Binary serialization failed for {data_type}, falling back to element-wise: {e}" + ) + buffer = BytesIO() # Reset buffer + _serialize_array_fallback(buffer, array, data_type, validity_buffer) + + return buffer.getvalue() + + +def _is_primitive_type(data_type: pa.DataType) -> bool: + """Check if type can be processed as primitive (fixed-size) data.""" + return ( + pa.types.is_integer(data_type) + or pa.types.is_floating(data_type) + or pa.types.is_boolean(data_type) + or pa.types.is_date(data_type) + or pa.types.is_time(data_type) + or pa.types.is_timestamp(data_type) + ) + + +def _serialize_primitive_array_binary( + buffer: BytesIO, array: pa.Array, data_type: pa.DataType, validity_buffer +): + """Serialize primitive arrays by directly copying binary data.""" + # Write validity bitmap + _serialize_validity_buffer(buffer, validity_buffer) + + # Get data buffer (buffer[1] for primitive types) + data_buffer = array.buffers()[1] + if data_buffer is not None: + # For primitive types, copy the buffer directly + if pa.types.is_boolean(data_type): + # Boolean needs the length for bit interpretation + buffer.write(struct.pack(" 0: + child_array = array.children[0] + + # Method 3: Try accessing via flatten() for some list types + elif hasattr(array, "flatten"): + try: + child_array = array.flatten() + except Exception: + pass + + # Recursively serialize child array + if child_array is not None: + child_data = _serialize_array_binary(child_array, data_type.value_type) + buffer.write(child_data) + else: + # If we can't access child arrays directly, fall back to element-wise processing + _serialize_array_fallback(buffer, array, data_type, validity_buffer) + + +def _serialize_struct_array_binary( + buffer: BytesIO, array: pa.Array, data_type: pa.DataType, validity_buffer +): + """Serialize struct arrays by processing child arrays.""" + # Write validity bitmap + _serialize_validity_buffer(buffer, validity_buffer) + + # Get child arrays - handle different access patterns for StructArray + child_arrays = [] + if hasattr(array, "field"): + # StructArray uses .field(i) to access child arrays + for i in range(len(data_type)): + child_arrays.append(array.field(i)) + elif hasattr(array, "children") and array.children: + # Some array types use .children + child_arrays = array.children + else: + # Fallback: try to access fields by iterating + try: + for i in range(len(data_type)): + child_arrays.append(array.field(i)) + except (AttributeError, IndexError): + # If all else fails, use element-wise processing + _serialize_array_fallback(buffer, array, data_type, validity_buffer) + return + + # Serialize each child field + for i, child_array in enumerate(child_arrays): + field_type = data_type[i].type + child_data = _serialize_array_binary(child_array, field_type) + buffer.write(child_data) + + +def _serialize_dictionary_array_binary( + buffer: BytesIO, array: pa.Array, data_type: pa.DataType, validity_buffer +): + """Serialize dictionary arrays using indices + dictionary.""" + # Write validity bitmap + _serialize_validity_buffer(buffer, validity_buffer) + + # Serialize indices array + indices_data = _serialize_array_binary(array.indices, data_type.index_type) + buffer.write(indices_data) + + # Serialize dictionary array + dict_data = _serialize_array_binary(array.dictionary, data_type.value_type) + buffer.write(dict_data) + + +def _serialize_validity_buffer(buffer: BytesIO, validity_buffer): + """Serialize validity (null) bitmap.""" + if validity_buffer is not None: + # Copy validity bitmap directly + buffer.write(validity_buffer.to_pybytes()) + # If no validity buffer, there are no nulls (implicit) + + +def _serialize_array_fallback( + buffer: BytesIO, array: pa.Array, data_type: pa.DataType, validity_buffer +): + """Fallback to element-wise processing for complex types.""" + # Write validity bitmap + _serialize_validity_buffer(buffer, validity_buffer) + + # Process element by element + for i in range(len(array)): + try: + null_mask = pc.is_null(array) # type: ignore + is_null = null_mask[i].as_py() + except: + # Fallback null check + try: + value = array[i].as_py() + is_null = value is None + except: + is_null = False + + if is_null: + buffer.write(b"\x00") + else: + buffer.write(b"\x01") + + # For complex nested types, convert to Python and serialize + try: + value = array[i].as_py() + _serialize_complex_value(buffer, value, data_type) + except Exception as e: + # If .as_py() fails, try alternative approaches + try: + # For some array types, we can access scalar values differently + scalar = array[i] + if hasattr(scalar, "value"): + value = scalar.value + else: + value = str(scalar) # Convert to string as last resort + _serialize_complex_value(buffer, value, data_type) + except Exception: + # Absolute fallback - serialize type name and index + fallback_str = f"{data_type}[{i}]" + fallback_bytes = fallback_str.encode("utf-8") + buffer.write(struct.pack(" str: + """Create deterministic hash using binary serialization.""" + serialized = serialize_table_logical(table, order_options) + + if algorithm == "sha256": + hasher = hashlib.sha256() + elif algorithm == "sha3_256": + hasher = hashlib.sha3_256() + elif algorithm == "blake2b": + hasher = hashlib.blake2b() + else: + raise ValueError(f"Unsupported hash algorithm: {algorithm}") + + hasher.update(serialized) + return hasher.hexdigest() + + +def serialize_table_logical_streaming( + table: pa.Table, order_options: OrderOptions | None = None +) -> str: + """ + Memory-efficient streaming version that produces the same hash as serialize_table_logical_hash. + + This version processes data in streaming fashion but maintains the same logical structure + as the non-streaming version to ensure identical hashes and chunking independence. + """ + if order_options is None: + order_options = OrderOptions() + + hasher = hashlib.sha256() + + # Hash format version (same as non-streaming) + hasher.update(b"ARROW_BINARY_V1") + + # Hash order options + hasher.update(order_options.to_bytes()) + + # Apply ordering transformations if requested + processed_table = table + + if order_options.ignore_column_order: + processed_table = _sort_table_columns_by_name(processed_table) + + if order_options.ignore_row_order: + processed_table = _sort_table_by_content(processed_table) + + # Hash schema (same as non-streaming) + schema_buffer = BytesIO() + _serialize_schema_deterministic(schema_buffer, processed_table.schema) + hasher.update(schema_buffer.getvalue()) + + # Process each column using the same logic as non-streaming + for i in range(processed_table.num_columns): + column = processed_table.column(i) + field = processed_table.schema.field(i) + + # Use the same column serialization logic for chunking independence + column_data = _serialize_column_binary(column, field) + + # Hash the column data + hasher.update(column_data) + + return hasher.hexdigest() + + +# IPC serialization for comparison (updated to include order options for fair comparison) +def serialize_table_ipc( + table: pa.Table, order_options: OrderOptions | None = None +) -> bytes: + """Serialize table using Arrow IPC format for comparison.""" + from io import BytesIO + import pyarrow.ipc as ipc + + if order_options is None: + order_options = OrderOptions() + + buffer = BytesIO() + + # Add format version for consistency with logical serialization + buffer.write(b"ARROW_IPC_V2") + + # Add order options + buffer.write(order_options.to_bytes()) + + # Apply ordering transformations if requested + processed_table = table + + if order_options.ignore_column_order: + processed_table = _sort_table_columns_by_name(processed_table) + + if order_options.ignore_row_order: + processed_table = _sort_table_by_content(processed_table) + + # Standard IPC serialization + ipc_buffer = BytesIO() + with ipc.new_stream(ipc_buffer, processed_table.schema) as writer: + writer.write_table(processed_table) + + # Append IPC data + buffer.write(ipc_buffer.getvalue()) + + return buffer.getvalue() + + +# Test utilities (updated to test order independence) +def create_test_table_1(): + """Create a basic test table with various data types.""" + return pa.table( + { + "int32_col": pa.array([1, 2, None, 4, 5], type=pa.int32()), + "float64_col": pa.array([1.1, 2.2, 3.3, None, 5.5], type=pa.float64()), + "string_col": pa.array(["hello", "world", None, "arrow", "fast"]), + "bool_col": pa.array([True, False, None, True, False]), + "binary_col": pa.array([b"data1", b"data2", None, b"data4", b"data5"]), + } + ) + + +def create_test_table_reordered_columns(): + """Same data as test_table_1 but with different column order.""" + return pa.table( + { + "string_col": pa.array(["hello", "world", None, "arrow", "fast"]), + "bool_col": pa.array([True, False, None, True, False]), + "int32_col": pa.array([1, 2, None, 4, 5], type=pa.int32()), + "binary_col": pa.array([b"data1", b"data2", None, b"data4", b"data5"]), + "float64_col": pa.array([1.1, 2.2, 3.3, None, 5.5], type=pa.float64()), + } + ) + + +def create_test_table_reordered_rows(): + """Same data as test_table_1 but with different row order.""" + return pa.table( + { + "int32_col": pa.array([5, 4, None, 2, 1], type=pa.int32()), + "float64_col": pa.array([5.5, None, 3.3, 2.2, 1.1], type=pa.float64()), + "string_col": pa.array(["fast", "arrow", None, "world", "hello"]), + "bool_col": pa.array([False, True, None, False, True]), + "binary_col": pa.array([b"data5", b"data4", None, b"data2", b"data1"]), + } + ) + + +def create_test_table_different_types(): + """Same logical data but with different Arrow types where possible.""" + return pa.table( + { + "int32_col": pa.array( + [1, 2, None, 4, 5], type=pa.int64() + ), # int64 instead of int32 + "float64_col": pa.array( + [1.1, 2.2, 3.3, None, 5.5], type=pa.float32() + ), # float32 instead of float64 + "string_col": pa.array(["hello", "world", None, "arrow", "fast"]), + "bool_col": pa.array([True, False, None, True, False]), + "binary_col": pa.array([b"data1", b"data2", None, b"data4", b"data5"]), + } + ) + + +def create_test_table_different_chunking(): + """Same data as test_table_1 but with different chunking.""" + # Create arrays with explicit chunking + int_chunks = [ + pa.array([1, 2], type=pa.int32()), + pa.array([None, 4, 5], type=pa.int32()), + ] + float_chunks = [ + pa.array([1.1], type=pa.float64()), + pa.array([2.2, 3.3, None, 5.5], type=pa.float64()), + ] + string_chunks = [pa.array(["hello", "world"]), pa.array([None, "arrow", "fast"])] + bool_chunks = [pa.array([True, False, None]), pa.array([True, False])] + binary_chunks = [ + pa.array([b"data1"]), + pa.array([b"data2", None, b"data4", b"data5"]), + ] + + return pa.table( + { + "int32_col": pa.chunked_array(int_chunks), + "float64_col": pa.chunked_array(float_chunks), + "string_col": pa.chunked_array(string_chunks), + "bool_col": pa.chunked_array(bool_chunks), + "binary_col": pa.chunked_array(binary_chunks), + } + ) + + +def create_test_table_complex_types(): + """Create a table with complex nested types.""" + return pa.table( + { + "list_col": pa.array( + [[1, 2], [3, 4, 5], None, [], [6]], type=pa.list_(pa.int32()) + ), + "struct_col": pa.array( + [ + {"a": 1, "b": "x"}, + {"a": 2, "b": "y"}, + None, + {"a": 3, "b": "z"}, + {"a": 4, "b": "w"}, + ], + type=pa.struct([("a", pa.int32()), ("b", pa.string())]), + ), + "dict_col": pa.array( + ["apple", "banana", "apple", None, "cherry"] + ).dictionary_encode(), + } + ) + + +def run_comprehensive_tests(): + """Run comprehensive test suite for serialization with order independence.""" + import time + + print("=" * 70) + print("COMPREHENSIVE ARROW SERIALIZATION TEST SUITE (WITH ORDER OPTIONS)") + print("=" * 70) + + # Test cases + test_cases = [ + ("Basic table", create_test_table_1), + ("Reordered columns", create_test_table_reordered_columns), + ("Reordered rows", create_test_table_reordered_rows), + ("Different types", create_test_table_different_types), + ("Different chunking", create_test_table_different_chunking), + ("Complex types", create_test_table_complex_types), + ] + + # Order option combinations to test + order_configs = [ + ("Default (order-sensitive)", OrderOptions(False, False)), + ("Column-order independent", OrderOptions(True, False)), + ("Row-order independent", OrderOptions(False, True)), + ("Fully order-independent", OrderOptions(True, True)), + ] + + print("\n1. ORDER INDEPENDENCE TESTS") + print("-" * 50) + + base_table = create_test_table_1() + reordered_cols = create_test_table_reordered_columns() + reordered_rows = create_test_table_reordered_rows() + + for config_name, order_opts in order_configs: + print(f"\n{config_name}:") + print(f" Config: {order_opts}") + + # Test with base table + base_hash = serialize_table_logical_hash(base_table, order_options=order_opts) + cols_hash = serialize_table_logical_hash( + reordered_cols, order_options=order_opts + ) + rows_hash = serialize_table_logical_hash( + reordered_rows, order_options=order_opts + ) + + # Test streaming consistency + base_stream = serialize_table_logical_streaming( + base_table, order_options=order_opts + ) + + print(f" Base table: {base_hash[:12]}...") + print(f" Reordered columns: {cols_hash[:12]}...") + print(f" Reordered rows: {rows_hash[:12]}...") + print(f" Streaming matches: {base_hash == base_stream}") + + # Check expected behavior + cols_should_match = order_opts.ignore_column_order + rows_should_match = order_opts.ignore_row_order + + cols_match = base_hash == cols_hash + rows_match = base_hash == rows_hash + + cols_status = "✓" if cols_match == cols_should_match else "✗" + rows_status = "✓" if rows_match == rows_should_match else "✗" + + print( + f" {cols_status} Column order independence: {cols_match} (expected: {cols_should_match})" + ) + print( + f" {rows_status} Row order independence: {rows_match} (expected: {rows_should_match})" + ) + + print("\n2. CHUNKING INDEPENDENCE WITH ORDER OPTIONS") + print("-" * 50) + + original = create_test_table_1() + combined = original.combine_chunks() + different_chunking = create_test_table_different_chunking() + + for config_name, order_opts in order_configs: + orig_hash = serialize_table_logical_hash(original, order_options=order_opts) + comb_hash = serialize_table_logical_hash(combined, order_options=order_opts) + diff_hash = serialize_table_logical_hash( + different_chunking, order_options=order_opts + ) + + chunking_independent = orig_hash == comb_hash == diff_hash + status = "✓" if chunking_independent else "✗" + + print( + f"{status} {config_name:25} | Chunking independent: {chunking_independent}" + ) + + print("\n3. FORMAT VERSION COMPATIBILITY") + print("-" * 50) + + # Test that different order options produce different hashes when they should + test_table = create_test_table_1() + + hashes = {} + for config_name, order_opts in order_configs: + hash_value = serialize_table_logical_hash(test_table, order_options=order_opts) + hashes[config_name] = hash_value + print(f"{config_name:25} | {hash_value[:16]}...") + + # Verify that order-sensitive vs order-independent produce different hashes + default_hash = hashes["Default (order-sensitive)"] + col_indep_hash = hashes["Column-order independent"] + row_indep_hash = hashes["Row-order independent"] + full_indep_hash = hashes["Fully order-independent"] + + print(f"\nHash uniqueness:") + print(f" Default != Col-independent: {default_hash != col_indep_hash}") + print(f" Default != Row-independent: {default_hash != row_indep_hash}") + print(f" Default != Fully independent: {default_hash != full_indep_hash}") + + print("\n4. CONTENT EQUIVALENCE TEST") + print("-" * 50) + + # Create tables with same content but different presentation + table_a = pa.table({"col1": pa.array([1, 2, 3]), "col2": pa.array(["a", "b", "c"])}) + + table_b = pa.table( + { + "col2": pa.array(["a", "b", "c"]), # Different column order + "col1": pa.array([1, 2, 3]), + } + ) + + table_c = pa.table( + { + "col1": pa.array([3, 1, 2]), # Different row order + "col2": pa.array(["c", "a", "b"]), + } + ) + + table_d = pa.table( + { + "col2": pa.array(["c", "a", "b"]), # Both different + "col1": pa.array([3, 1, 2]), + } + ) + + full_indep_opts = OrderOptions(True, True) + + hash_a = serialize_table_logical_hash(table_a, order_options=full_indep_opts) + hash_b = serialize_table_logical_hash(table_b, order_options=full_indep_opts) + hash_c = serialize_table_logical_hash(table_c, order_options=full_indep_opts) + hash_d = serialize_table_logical_hash(table_d, order_options=full_indep_opts) + + all_match = hash_a == hash_b == hash_c == hash_d + status = "✓" if all_match else "✗" + + print(f"{status} Content equivalence test:") + print(f" Table A (original): {hash_a[:12]}...") + print(f" Table B (reord cols): {hash_b[:12]}...") + print(f" Table C (reord rows): {hash_c[:12]}...") + print(f" Table D (both reord): {hash_d[:12]}...") + print(f" All hashes match: {all_match}") + + print("\n5. PERFORMANCE COMPARISON") + print("-" * 50) + + # Create larger table for performance testing + large_size = 10000 + large_table = pa.table( + { + "int_col": pa.array(list(range(large_size)), type=pa.int32()), + "float_col": pa.array( + [i * 1.5 for i in range(large_size)], type=pa.float64() + ), + "string_col": pa.array([f"item_{i}" for i in range(large_size)]), + "bool_col": pa.array([i % 2 == 0 for i in range(large_size)]), + } + ) + + # Time each method with different order options + for config_name, order_opts in order_configs: + times = [] + for _ in range(3): # Run 3 times for average + start = time.time() + hash_result = serialize_table_logical_hash( + large_table, order_options=order_opts + ) + end = time.time() + times.append(end - start) + + avg_time = sum(times) / len(times) + throughput = (large_size * 4) / avg_time # 4 columns + + print( + f"{config_name:25} | {avg_time * 1000:6.1f}ms | {throughput:8.0f} values/sec" + ) + + print(f"\n{'=' * 70}") + print("ORDER-INDEPENDENT SERIALIZATION TEST SUITE COMPLETE") + print(f"{'=' * 70}") + + +# Main execution +if __name__ == "__main__": + # Run the comprehensive test suite + test_results = run_comprehensive_tests() diff --git a/src/orcapod/hashing/arrow_utils.py b/src/orcapod/hashing/arrow_utils.py new file mode 100644 index 0000000..7dc565e --- /dev/null +++ b/src/orcapod/hashing/arrow_utils.py @@ -0,0 +1,408 @@ +import json +import hashlib +from typing import Any, TYPE_CHECKING +from decimal import Decimal +import base64 +from orcapod.utils.lazy_module import LazyModule + +if TYPE_CHECKING: + import pyarrow as pa +else: + pa = LazyModule("pyarrow") + + +def serialize_pyarrow_table_schema(table: "pa.Table") -> str: + """ + Serialize PyArrow table schema to JSON with Python type names and filtered metadata. + + Args: + table: PyArrow table + + Returns: + JSON string representation of schema + """ + schema_info = [] + + for field in table.schema: + field_info = { + "name": field.name, + "type": _arrow_type_to_python_type(field.type), + "metadata": _extract_semantic_metadata(field.metadata), + } + schema_info.append(field_info) + + return json.dumps(schema_info, separators=(",", ":"), sort_keys=True) + + +def serialize_pyarrow_table(table: "pa.Table") -> str: + """ + Serialize a PyArrow table to a stable JSON string with both schema and data. + + Args: + table: PyArrow table to serialize + + Returns: + JSON string representation with schema and data sections + """ + # Convert table to dictionary of lists using to_pylist() + data_dict = {} + + for column_name in table.column_names: + column = table.column(column_name) + # Convert Arrow column to Python list, which visits all elements + column_values = column.to_pylist() + + # Handle special types that need encoding for JSON + data_dict[column_name] = [ + _serialize_value_for_json(val) for val in column_values + ] + + # Serialize schema + schema_info = [] + for field in table.schema: + field_info = { + "name": field.name, + "type": _arrow_type_to_python_type(field.type), + "metadata": _extract_semantic_metadata(field.metadata), + } + schema_info.append(field_info) + + # Combine schema and data + serialized_table = {"schema": schema_info, "data": data_dict} + + # Serialize to JSON with sorted keys and no whitespace + return json.dumps( + serialized_table, + separators=(",", ":"), + sort_keys=True, + default=_json_serializer, + ) + + +def get_pyarrow_table_hash(table: "pa.Table") -> str: + """ + Get a stable SHA-256 hash of the table content. + + Args: + table: PyArrow table + + Returns: + SHA-256 hash of the serialized table + """ + serialized = serialize_pyarrow_table(table) + return hashlib.sha256(serialized.encode("utf-8")).hexdigest() + + +def deserialize_to_pyarrow_table(serialized_str: str) -> "pa.Table": + """ + Deserialize JSON string back to a PyArrow table. + + Args: + serialized_str: JSON string from serialize_pyarrow_table + + Returns: + Reconstructed PyArrow table + """ + parsed_data = json.loads(serialized_str) + + # Handle both old format (dict of lists) and new format (schema + data) + if "data" in parsed_data and "schema" in parsed_data: + # New format with schema and data + data_dict = parsed_data["data"] + schema_info = parsed_data["schema"] + else: + # Old format - just data dict + data_dict = parsed_data + schema_info = None + + if not data_dict: + return pa.table([]) + + # Deserialize each column + arrays = [] + names = [] + + for column_name in sorted(data_dict.keys()): # Sort for consistency + column_values = [_deserialize_value(val) for val in data_dict[column_name]] + arrays.append(pa.array(column_values)) + names.append(column_name) + + return pa.table(arrays, names=names) + + +def _arrow_type_to_python_type(arrow_type: pa.DataType) -> str: + """ + Convert PyArrow data type to standard Python type name. + + Args: + arrow_type: PyArrow data type + + Returns: + Python type name as string + """ + if pa.types.is_boolean(arrow_type): + return "bool" + elif pa.types.is_integer(arrow_type): + return "int" + elif pa.types.is_floating(arrow_type): + return "float" + elif pa.types.is_string(arrow_type) or pa.types.is_large_string(arrow_type): + return "str" + elif pa.types.is_binary(arrow_type) or pa.types.is_large_binary(arrow_type): + return "bytes" + elif pa.types.is_date(arrow_type): + return "date" + elif pa.types.is_timestamp(arrow_type): + return "datetime" + elif pa.types.is_time(arrow_type): + return "time" + elif pa.types.is_decimal(arrow_type): + return "decimal" + elif pa.types.is_list(arrow_type) or pa.types.is_large_list(arrow_type): + element_type = _arrow_type_to_python_type(arrow_type.value_type) + return f"list[{element_type}]" + elif pa.types.is_struct(arrow_type): + return "dict" + elif pa.types.is_dictionary(arrow_type): + value_type = _arrow_type_to_python_type(arrow_type.value_type) + return value_type # Dictionary encoding is transparent + elif pa.types.is_null(arrow_type): + return "NoneType" + else: + # Fallback for other types + return str(arrow_type).lower() + + +def _extract_semantic_metadata(field_metadata) -> dict[str, str]: + """ + Extract only 'semantic_type' metadata from field metadata. + + Args: + field_metadata: PyArrow field metadata (can be None) + + Returns: + Dictionary containing only semantic_type if present, empty dict otherwise + """ + if field_metadata is None: + return {} + + metadata_dict = dict(field_metadata) + + # Only keep semantic_type if it exists + if "semantic_type" in metadata_dict: + return { + "semantic_type": metadata_dict["semantic_type"].decode("utf-8") + if isinstance(metadata_dict["semantic_type"], bytes) + else metadata_dict["semantic_type"] + } + else: + return {} + + +def _serialize_value_for_json(value: Any) -> Any: + """ + Prepare a Python value for JSON serialization. + + Args: + value: Python value from to_pylist() + + Returns: + JSON-serializable value + """ + if value is None: + return None + elif isinstance(value, bytes): + return { + "__type__": "bytes", + "__value__": base64.b64encode(value).decode("ascii"), + } + elif isinstance(value, Decimal): + return {"__type__": "decimal", "__value__": str(value)} + elif hasattr(value, "date") and hasattr(value, "time"): # datetime objects + return {"__type__": "datetime", "__value__": value.isoformat()} + elif hasattr(value, "isoformat") and not hasattr( + value, "time" + ): # date objects (no time component) + return {"__type__": "date", "__value__": value.isoformat()} + elif isinstance(value, (list, tuple)): + return [_serialize_value_for_json(item) for item in value] + elif isinstance(value, dict): + return {k: _serialize_value_for_json(v) for k, v in sorted(value.items())} + else: + return value + + +def _deserialize_value(value: Any) -> Any: + """ + Deserialize a value from the JSON representation. + + Args: + value: Value from JSON + + Returns: + Python value suitable for PyArrow + """ + if value is None: + return None + elif isinstance(value, dict) and "__type__" in value: + type_name = value["__type__"] + val = value["__value__"] + + if type_name == "bytes": + return base64.b64decode(val.encode("ascii")) + elif type_name == "decimal": + return Decimal(val) + elif type_name == "datetime": + from datetime import datetime + + return datetime.fromisoformat(val) + elif type_name == "date": + from datetime import date + + return date.fromisoformat(val) + else: + return val + elif isinstance(value, list): + return [_deserialize_value(item) for item in value] + elif isinstance(value, dict): + return {k: _deserialize_value(v) for k, v in value.items()} + else: + return value + + +def _json_serializer(obj): + """Custom JSON serializer for edge cases.""" + if hasattr(obj, "date") and hasattr(obj, "time"): # datetime objects + return {"__type__": "datetime", "__value__": obj.isoformat()} + elif hasattr(obj, "isoformat") and not hasattr(obj, "time"): # date objects + return {"__type__": "date", "__value__": obj.isoformat()} + elif isinstance(obj, bytes): + return {"__type__": "bytes", "__value__": base64.b64encode(obj).decode("ascii")} + elif isinstance(obj, Decimal): + return {"__type__": "decimal", "__value__": str(obj)} + else: + return str(obj) # Fallback to string representation + + +# Example usage and testing +if __name__ == "__main__": + import datetime + + # Create a sample PyArrow table with various types + data = { + "integers": [1, 2, 3, 4, 5], + "floats": [1.1, 2.2, 3.3, 4.4, 5.5], + "strings": ["a", "b", "c", "d", "e"], + "booleans": [True, False, True, False, True], + "nulls": [1, None, 3, None, 5], + "dates": [ + datetime.date(2023, 1, 1), + datetime.date(2023, 1, 2), + None, + datetime.date(2023, 1, 4), + datetime.date(2023, 1, 5), + ], + } + + table = pa.table(data) + print("Original table:") + print(table) + print() + + # Serialize the table + serialized = serialize_pyarrow_table(table) + print("Serialized JSON (first 200 chars):") + print(serialized[:200] + "..." if len(serialized) > 200 else serialized) + print() + + # Get hash + table_hash = get_pyarrow_table_hash(table) + print(f"Table hash: {table_hash}") + print() + + # Test stability + serialized2 = serialize_pyarrow_table(table) + hash2 = get_pyarrow_table_hash(table) + + print(f"Serialization is stable: {serialized == serialized2}") + print(f"Hash is stable: {table_hash == hash2}") + print() + + # Test with different column order + print("--- Testing column order stability ---") + data_reordered = { + "strings": ["a", "b", "c", "d", "e"], + "integers": [1, 2, 3, 4, 5], + "nulls": [1, None, 3, None, 5], + "floats": [1.1, 2.2, 3.3, 4.4, 5.5], + "booleans": [True, False, True, False, True], + "dates": [ + datetime.date(2023, 1, 1), + datetime.date(2023, 1, 2), + None, + datetime.date(2023, 1, 4), + datetime.date(2023, 1, 5), + ], + } + + table_reordered = pa.table(data_reordered) + serialized_reordered = serialize_pyarrow_table(table_reordered) + hash_reordered = get_pyarrow_table_hash(table_reordered) + + print( + f"Same content, different column order produces same serialization: {serialized == serialized_reordered}" + ) + print( + f"Same content, different column order produces same hash: {table_hash == hash_reordered}" + ) + print() + + # Test schema serialization + print("\n--- Testing schema serialization ---") + + # Create table with metadata + schema = pa.schema( + [ + pa.field( + "integers", + pa.int64(), + metadata={"semantic_type": "id", "other_meta": "ignored"}, + ), + pa.field("floats", pa.float64(), metadata={"semantic_type": "measurement"}), + pa.field("strings", pa.string()), # No metadata + pa.field( + "booleans", pa.bool_(), metadata={"other_meta": "ignored"} + ), # No semantic_type + pa.field("dates", pa.date32(), metadata={"semantic_type": "event_date"}), + ] + ) + + table_with_schema = pa.table(data, schema=schema) + schema_json = serialize_pyarrow_table_schema(table_with_schema) + print(f"Schema JSON: {schema_json}") + + # Parse and display nicely + import json as json_module + + schema_parsed = json_module.loads(schema_json) + print("\nParsed schema:") + for field in schema_parsed: + print(f" {field['name']}: {field['type']} (metadata: {field['metadata']})") + + # Test deserialization + reconstructed = deserialize_to_pyarrow_table(serialized) + print("Reconstructed table:") + print(reconstructed) + print() + + # Verify round-trip + reconstructed_hash = get_pyarrow_table_hash(reconstructed) + print(f"Round-trip hash matches: {table_hash == reconstructed_hash}") + + # Show actual JSON structure for small example + print("\n--- Small example JSON structure ---") + small_table = pa.table( + {"numbers": [1, 2, None], "text": ["hello", "world", "test"]} + ) + small_json = serialize_pyarrow_table(small_table) + print(f"Small table JSON: {small_json}") diff --git a/src/orcapod/hashing/content_identifiable.py b/src/orcapod/hashing/content_identifiable.py new file mode 100644 index 0000000..1e48243 --- /dev/null +++ b/src/orcapod/hashing/content_identifiable.py @@ -0,0 +1,112 @@ +from orcapod.hashing.types import ObjectHasher +from orcapod.hashing.defaults import get_default_object_hasher +from typing import Any + + +class ContentIdentifiableBase: + """ + Base class for content-identifiable objects. + This class provides a way to define objects that can be uniquely identified + based on their content rather than their identity in memory. Specifically, the identity of the + object is determined by the structure returned by the `identity_structure` method. + The hash of the object is computed based on the `identity_structure` using the provided `ObjectHasher`, + which defaults to the one returned by `get_default_object_hasher`. + Two content-identifiable objects are considered equal if their `identity_structure` returns the same value. + """ + + def __init__( + self, + identity_structure_hasher: ObjectHasher | None = None, + label: str | None = None, + ) -> None: + """ + Initialize the ContentHashable with an optional ObjectHasher. + + Args: + identity_structure_hasher (ObjectHasher | None): An instance of ObjectHasher to use for hashing. + """ + self.identity_structure_hasher = ( + identity_structure_hasher or get_default_object_hasher() + ) + self._label = label + + @property + def has_assigned_label(self) -> bool: + """ + Check if the label is explicitly set for this object. + + Returns: + bool: True if the label is explicitly set, False otherwise. + """ + return self._label is not None + + @property + def label(self) -> str: + """ + Get the label of this object. + + Returns: + str | None: The label of the object, or None if not set. + """ + return self._label or self.computed_label() or self.__class__.__name__ + + @label.setter + def label(self, label: str | None) -> None: + """ + Set the label of this object. + + Args: + label (str | None): The label to set for this object. + """ + self._label = label + + def computed_label(self) -> str | None: + """ + Compute a label for this object based on its content. If label is not explicitly set for this object + and computed_label returns a valid value, it will be used as label of this object. + """ + return None + + def identity_structure(self) -> Any: + """ + Return a structure that represents the identity of this object. + + Override this method in your subclass to provide a stable representation + of your object's content. The structure should contain all fields that + determine the object's identity. + + Returns: + Any: A structure representing this object's content, or None to use default hash + """ + return None + + def __hash__(self) -> int: + """ + Hash implementation that uses the identity structure if provided, + otherwise falls back to the superclass's hash method. + + Returns: + int: A hash value based on either content or identity + """ + # Get the identity structure + structure = self.identity_structure() + if structure is None: + # If no identity structure is provided, use the default hash + return super().__hash__() + + return self.identity_structure_hasher.hash_to_int(structure) + + def __eq__(self, other: object) -> bool: + """ + Equality check that compares the identity structures of two objects. + + Args: + other (object): The object to compare against. + + Returns: + bool: True if both objects have the same identity structure, False otherwise. + """ + if not isinstance(other, ContentIdentifiableBase): + return NotImplemented + + return self.identity_structure() == other.identity_structure() diff --git a/src/orcapod/hashing/defaults.py b/src/orcapod/hashing/defaults.py index 85e1405..c9e404b 100644 --- a/src/orcapod/hashing/defaults.py +++ b/src/orcapod/hashing/defaults.py @@ -1,43 +1,61 @@ # A collection of utility function that provides a "default" implementation of hashers. # This is often used as the fallback hasher in the library code. -from orcapod.hashing.types import CompositeFileHasher, ArrowHasher -from orcapod.hashing.file_hashers import PathLikeHasherFactory +from orcapod.protocols import hashing_protocols as hp + from orcapod.hashing.string_cachers import InMemoryCacher -from orcapod.hashing.object_hashers import ObjectHasher from orcapod.hashing.object_hashers import LegacyObjectHasher from orcapod.hashing.function_info_extractors import FunctionInfoExtractorFactory -from orcapod.hashing.semantic_arrow_hasher import SemanticArrowHasher, PathHasher +from orcapod.hashing.versioned_hashers import ( + get_versioned_semantic_arrow_hasher, + get_versioned_object_hasher, +) -def get_default_composite_file_hasher(with_cache=True) -> CompositeFileHasher: - if with_cache: +def get_default_arrow_hasher( + cache_file_hash: bool | hp.StringCacher = True, +) -> hp.ArrowHasher: + """ + Get the default Arrow hasher with semantic type support. + If `cache_file_hash` is True, it uses an in-memory cacher for caching hash values. If a `StringCacher` is provided, it uses that for caching file hashes. + """ + arrow_hasher = get_versioned_semantic_arrow_hasher() + if cache_file_hash: # use unlimited caching - string_cacher = InMemoryCacher(max_size=None) - return PathLikeHasherFactory.create_cached_composite(string_cacher) - return PathLikeHasherFactory.create_basic_composite() + if cache_file_hash is True: + string_cacher = InMemoryCacher(max_size=None) + else: + string_cacher = cache_file_hash + + arrow_hasher.set_cacher("path", string_cacher) + + return arrow_hasher -def get_default_composite_file_hasher_with_cacher(cacher=None) -> CompositeFileHasher: - if cacher is None: - cacher = InMemoryCacher(max_size=None) - return PathLikeHasherFactory.create_cached_composite(cacher) +def get_default_object_hasher() -> hp.ObjectHasher: + object_hasher = get_versioned_object_hasher() + return object_hasher -def get_default_object_hasher() -> ObjectHasher: +def get_legacy_object_hasher() -> hp.ObjectHasher: function_info_extractor = ( FunctionInfoExtractorFactory.create_function_info_extractor( strategy="signature" ) ) - return LegacyObjectHasher( - char_count=32, function_info_extractor=function_info_extractor - ) + return LegacyObjectHasher(function_info_extractor=function_info_extractor) -def get_default_arrow_hasher( - chunk_size: int = 8192, handle_missing: str = "error" -) -> ArrowHasher: - hasher = SemanticArrowHasher(chunk_size=chunk_size, handle_missing=handle_missing) - # register semantic hasher for Path - hasher.register_semantic_hasher("Path", PathHasher()) - return hasher +# def get_default_composite_file_hasher(with_cache=True) -> LegacyCompositeFileHasher: +# if with_cache: +# # use unlimited caching +# string_cacher = InMemoryCacher(max_size=None) +# return LegacyPathLikeHasherFactory.create_cached_legacy_composite(string_cacher) +# return LegacyPathLikeHasherFactory.create_basic_legacy_composite() + + +# def get_default_composite_file_hasher_with_cacher( +# cacher=None, +# ) -> LegacyCompositeFileHasher: +# if cacher is None: +# cacher = InMemoryCacher(max_size=None) +# return LegacyPathLikeHasherFactory.create_cached_legacy_composite(cacher) diff --git a/src/orcapod/hashing/file_hashers.py b/src/orcapod/hashing/file_hashers.py index 77833ee..d5fc761 100644 --- a/src/orcapod/hashing/file_hashers.py +++ b/src/orcapod/hashing/file_hashers.py @@ -1,15 +1,12 @@ -from orcapod.hashing.core import hash_file, hash_pathset, hash_packet -from orcapod.hashing.types import ( - FileHasher, - PathSetHasher, +from orcapod.hashing import legacy_core +from orcapod.hashing.hash_utils import hash_file +from orcapod.protocols.hashing_protocols import ( + FileContentHasher, StringCacher, - CompositeFileHasher, ) -from orcapod.types import Packet, PathLike, PathSet +from orcapod.types import PathLike, PathSet, PacketLike -# Completely unnecessary to inherit from FileHasher, but this -# allows for type checking based on isinstance class BasicFileHasher: """Basic implementation for file hashing.""" @@ -21,7 +18,7 @@ def __init__( self.algorithm = algorithm self.buffer_size = buffer_size - def hash_file(self, file_path: PathLike) -> str: + def hash_file(self, file_path: PathLike) -> bytes: return hash_file( file_path, algorithm=self.algorithm, buffer_size=self.buffer_size ) @@ -32,133 +29,192 @@ class CachedFileHasher: def __init__( self, - file_hasher: FileHasher, + file_hasher: FileContentHasher, string_cacher: StringCacher, ): self.file_hasher = file_hasher self.string_cacher = string_cacher - def hash_file(self, file_path: PathLike) -> str: + def hash_file(self, file_path: PathLike) -> bytes: cache_key = f"file:{file_path}" cached_value = self.string_cacher.get_cached(cache_key) if cached_value is not None: - return cached_value + return bytes.fromhex(cached_value) value = self.file_hasher.hash_file(file_path) - self.string_cacher.set_cached(cache_key, value) + self.string_cacher.set_cached(cache_key, value.hex()) return value -class DefaultPathsetHasher: - """Default pathset hasher that composes file hashing.""" - - def __init__( - self, - file_hasher: FileHasher, - char_count: int | None = 32, - ): - self.file_hasher = file_hasher - self.char_count = char_count - - def hash_pathset(self, pathset: PathSet) -> str: - """Hash a pathset using the injected file hasher.""" - return hash_pathset( - pathset, - char_count=self.char_count, - file_hasher=self.file_hasher.hash_file, # Inject the method - ) - - -class DefaultPacketHasher: - """Default packet hasher that composes pathset hashing.""" - - def __init__( - self, - pathset_hasher: PathSetHasher, - char_count: int | None = 32, - prefix: str = "", - ): - self.pathset_hasher = pathset_hasher - self.char_count = char_count - self.prefix = prefix - - def hash_packet(self, packet: Packet) -> str: - """Hash a packet using the injected pathset hasher.""" - hash_str = hash_packet( - packet, - char_count=self.char_count, - prefix_algorithm=False, # Will apply prefix on our own - pathset_hasher=self.pathset_hasher.hash_pathset, # Inject the method - ) - return f"{self.prefix}-{hash_str}" if self.prefix else hash_str - - -# Convenience composite implementation -class DefaultCompositeFileHasher: - """Composite hasher that implements all interfaces.""" - - def __init__( - self, - file_hasher: FileHasher, - char_count: int | None = 32, - packet_prefix: str = "", - ): - self.file_hasher = file_hasher - self.pathset_hasher = DefaultPathsetHasher(self.file_hasher, char_count) - self.packet_hasher = DefaultPacketHasher( - self.pathset_hasher, char_count, packet_prefix - ) - - def hash_file(self, file_path: PathLike) -> str: - return self.file_hasher.hash_file(file_path) - - def hash_pathset(self, pathset: PathSet) -> str: - return self.pathset_hasher.hash_pathset(pathset) - - def hash_packet(self, packet: Packet) -> str: - return self.packet_hasher.hash_packet(packet) - - -# Factory for easy construction -class PathLikeHasherFactory: - """Factory for creating various hasher combinations.""" - - @staticmethod - def create_basic_composite( - algorithm: str = "sha256", - buffer_size: int = 65536, - char_count: int | None = 32, - ) -> CompositeFileHasher: - """Create a basic composite hasher.""" - file_hasher = BasicFileHasher(algorithm, buffer_size) - # use algorithm as the prefix for the packet hasher - return DefaultCompositeFileHasher( - file_hasher, char_count, packet_prefix=algorithm - ) - - @staticmethod - def create_cached_composite( - string_cacher: StringCacher, - algorithm: str = "sha256", - buffer_size: int = 65536, - char_count: int | None = 32, - ) -> CompositeFileHasher: - """Create a composite hasher with file caching.""" - basic_file_hasher = BasicFileHasher(algorithm, buffer_size) - cached_file_hasher = CachedFileHasher(basic_file_hasher, string_cacher) - return DefaultCompositeFileHasher( - cached_file_hasher, char_count, packet_prefix=algorithm - ) - - @staticmethod - def create_file_hasher( - string_cacher: StringCacher | None = None, - algorithm: str = "sha256", - buffer_size: int = 65536, - ) -> FileHasher: - """Create just a file hasher, optionally with caching.""" - basic_hasher = BasicFileHasher(algorithm, buffer_size) - if string_cacher is None: - return basic_hasher - else: - return CachedFileHasher(basic_hasher, string_cacher) +# ----------------Legacy implementations for backward compatibility----------------- + + +# class LegacyDefaultFileHasher: +# def __init__( +# self, +# algorithm: str = "sha256", +# buffer_size: int = 65536, +# ): +# self.algorithm = algorithm +# self.buffer_size = buffer_size + +# def hash_file(self, file_path: PathLike) -> str: +# return legacy_core.hash_file( +# file_path, algorithm=self.algorithm, buffer_size=self.buffer_size +# ) + + +# class LegacyCachedFileHasher: +# """File hasher with caching.""" + +# def __init__( +# self, +# file_hasher: LegacyFileHasher, +# string_cacher: StringCacher, +# ): +# self.file_hasher = file_hasher +# self.string_cacher = string_cacher + +# def hash_file(self, file_path: PathLike) -> str: +# cache_key = f"file:{file_path}" +# cached_value = self.string_cacher.get_cached(cache_key) +# if cached_value is not None: +# return cached_value + +# value = self.file_hasher.hash_file(file_path) +# self.string_cacher.set_cached(cache_key, value) +# return value + + +# class LegacyDefaultPathsetHasher: +# """Default pathset hasher that composes file hashing.""" + +# def __init__( +# self, +# file_hasher: LegacyFileHasher, +# char_count: int | None = 32, +# ): +# self.file_hasher = file_hasher +# self.char_count = char_count + +# def _hash_file_to_hex(self, file_path: PathLike) -> str: +# return self.file_hasher.hash_file(file_path) + +# def hash_pathset(self, pathset: PathSet) -> str: +# """Hash a pathset using the injected file hasher.""" +# return legacy_core.hash_pathset( +# pathset, +# char_count=self.char_count, +# file_hasher=self.file_hasher.hash_file, # Inject the method +# ) + + +# class LegacyDefaultPacketHasher: +# """Default packet hasher that composes pathset hashing.""" + +# def __init__( +# self, +# pathset_hasher: LegacyPathSetHasher, +# char_count: int | None = 32, +# prefix: str = "", +# ): +# self.pathset_hasher = pathset_hasher +# self.char_count = char_count +# self.prefix = prefix + +# def _hash_pathset_to_hex(self, pathset: PathSet): +# return self.pathset_hasher.hash_pathset(pathset) + +# def hash_packet(self, packet: PacketLike) -> str: +# """Hash a packet using the injected pathset hasher.""" +# hash_str = legacy_core.hash_packet( +# packet, +# char_count=self.char_count, +# prefix_algorithm=False, # Will apply prefix on our own +# pathset_hasher=self._hash_pathset_to_hex, # Inject the method +# ) +# return f"{self.prefix}-{hash_str}" if self.prefix else hash_str + + +# # Convenience composite implementation +# class LegacyDefaultCompositeFileHasher: +# """Composite hasher that implements all interfaces.""" + +# def __init__( +# self, +# file_hasher: LegacyFileHasher, +# char_count: int | None = 32, +# packet_prefix: str = "", +# ): +# self.file_hasher = file_hasher +# self.pathset_hasher = LegacyDefaultPathsetHasher(self.file_hasher, char_count) +# self.packet_hasher = LegacyDefaultPacketHasher( +# self.pathset_hasher, char_count, packet_prefix +# ) + +# def hash_file(self, file_path: PathLike) -> str: +# return self.file_hasher.hash_file(file_path) + +# def hash_pathset(self, pathset: PathSet) -> str: +# return self.pathset_hasher.hash_pathset(pathset) + +# def hash_packet(self, packet: PacketLike) -> str: +# return self.packet_hasher.hash_packet(packet) + + +# # Factory for easy construction +# class LegacyPathLikeHasherFactory: +# """Factory for creating various hasher combinations.""" + +# @staticmethod +# def create_basic_legacy_composite( +# algorithm: str = "sha256", +# buffer_size: int = 65536, +# char_count: int | None = 32, +# ) -> LegacyCompositeFileHasher: +# """Create a basic composite hasher.""" +# file_hasher = LegacyDefaultFileHasher(algorithm, buffer_size) +# # use algorithm as the prefix for the packet hasher +# return LegacyDefaultCompositeFileHasher( +# file_hasher, char_count, packet_prefix=algorithm +# ) + +# @staticmethod +# def create_cached_legacy_composite( +# string_cacher: StringCacher, +# algorithm: str = "sha256", +# buffer_size: int = 65536, +# char_count: int | None = 32, +# ) -> LegacyCompositeFileHasher: +# """Create a composite hasher with file caching.""" +# basic_file_hasher = LegacyDefaultFileHasher(algorithm, buffer_size) +# cached_file_hasher = LegacyCachedFileHasher(basic_file_hasher, string_cacher) +# return LegacyDefaultCompositeFileHasher( +# cached_file_hasher, char_count, packet_prefix=algorithm +# ) + +# @staticmethod +# def create_legacy_file_hasher( +# string_cacher: StringCacher | None = None, +# algorithm: str = "sha256", +# buffer_size: int = 65536, +# ) -> LegacyFileHasher: +# """Create just a file hasher, optionally with caching.""" +# default_hasher = LegacyDefaultFileHasher(algorithm, buffer_size) +# if string_cacher is None: +# return default_hasher +# else: +# return LegacyCachedFileHasher(default_hasher, string_cacher) + +# @staticmethod +# def create_file_hasher( +# string_cacher: StringCacher | None = None, +# algorithm: str = "sha256", +# buffer_size: int = 65536, +# ) -> FileContentHasher: +# """Create just a file hasher, optionally with caching.""" +# basic_hasher = BasicFileHasher(algorithm, buffer_size) +# if string_cacher is None: +# return basic_hasher +# else: +# return CachedFileHasher(basic_hasher, string_cacher) diff --git a/src/orcapod/hashing/function_info_extractors.py b/src/orcapod/hashing/function_info_extractors.py index 2c32f05..27cae33 100644 --- a/src/orcapod/hashing/function_info_extractors.py +++ b/src/orcapod/hashing/function_info_extractors.py @@ -1,4 +1,4 @@ -from .types import FunctionInfoExtractor +from orcapod.protocols.hashing_protocols import FunctionInfoExtractor from collections.abc import Callable from typing import Any, Literal from orcapod.types import TypeSpec @@ -14,8 +14,8 @@ def extract_function_info( self, func: Callable[..., Any], function_name: str | None = None, - input_types: TypeSpec | None = None, - output_types: TypeSpec | None = None, + input_typespec: TypeSpec | None = None, + output_typespec: TypeSpec | None = None, ) -> dict[str, Any]: if not callable(func): raise TypeError("Provided object is not callable") @@ -38,8 +38,8 @@ def extract_function_info( self, func: Callable[..., Any], function_name: str | None = None, - input_types: TypeSpec | None = None, - output_types: TypeSpec | None = None, + input_typespec: TypeSpec | None = None, + output_typespec: TypeSpec | None = None, ) -> dict[str, Any]: if not callable(func): raise TypeError("Provided object is not callable") diff --git a/src/orcapod/hashing/hash_utils.py b/src/orcapod/hashing/hash_utils.py new file mode 100644 index 0000000..790b49f --- /dev/null +++ b/src/orcapod/hashing/hash_utils.py @@ -0,0 +1,364 @@ +from typing import Any +from .function_info_extractors import FunctionInfoExtractor +from orcapod.protocols.hashing_protocols import ContentIdentifiable +import logging +import json +from uuid import UUID +from pathlib import Path +from collections.abc import Mapping, Collection, Callable +import hashlib +import xxhash +import zlib +import inspect + +logger = logging.getLogger(__name__) + + +def serialize_through_json(processed_obj) -> bytes: + """ + Create a deterministic string representation of a processed object structure. + + Args: + processed_obj: The processed object to serialize + + Returns: + A bytes object ready for hashing + """ + # TODO: add type check of processed obj + return json.dumps(processed_obj, sort_keys=True, separators=(",", ":")).encode( + "utf-8" + ) + + +def process_structure( + obj: Any, + visited: set[int] | None = None, + function_info_extractor: FunctionInfoExtractor | None = None, + force_hash: bool = False, +) -> Any: + """ + Recursively process a structure to prepare it for hashing. + + Args: + obj: The object or structure to process + visited: Set of object ids already visited (to handle circular references) + function_info_extractor: FunctionInfoExtractor to be used for extracting necessary function representation + + Returns: + A processed version of the structure suitable for stable hashing + """ + # Initialize the visited set if this is the top-level call + if visited is None: + visited = set() + else: + visited = visited.copy() # Copy to avoid modifying the original set + + # Check for circular references - use object's memory address + # NOTE: While id() is not stable across sessions, we only use it within a session + # to detect circular references, not as part of the final hash + obj_id = id(obj) + if obj_id in visited: + logger.debug( + f"Detected circular reference for object of type {type(obj).__name__}" + ) + return "CircularRef" # Don't include the actual id in hash output + + # For objects that could contain circular references, add to visited + if isinstance(obj, (dict, list, tuple, set)) or not isinstance( + obj, (str, int, float, bool, type(None)) + ): + visited.add(obj_id) + + # Handle None + if obj is None: + return None + + # TODO: currently using runtime_checkable on ContentIdentifiable protocol + # Re-evaluate this strategy to see if a faster / more robust check could be used + if isinstance(obj, ContentIdentifiable): + logger.debug( + f"Processing ContentHashableBase instance of type {type(obj).__name__}" + ) + # replace the object with expanded identity structure and re-process + return process_structure( + obj.identity_structure(), visited, function_info_extractor + ) + + # Handle basic types + if isinstance(obj, (str, int, float, bool)): + return obj + + # Handle bytes and bytearray + if isinstance(obj, (bytes, bytearray)): + logger.debug( + f"Converting bytes/bytearray of length {len(obj)} to hex representation" + ) + return obj.hex() + + # Handle Path objects + if isinstance(obj, Path): + logger.debug(f"Converting Path object to string: {obj}") + return str(obj) + + # Handle UUID objects + if isinstance(obj, UUID): + logger.debug(f"Converting UUID to string: {obj}") + return str(obj) + + # Handle named tuples (which are subclasses of tuple) + if hasattr(obj, "_fields") and isinstance(obj, tuple): + logger.debug(f"Processing named tuple of type {type(obj).__name__}") + # For namedtuples, convert to dict and then process + d = {field: getattr(obj, field) for field in obj._fields} # type: ignore + return process_structure(d, visited, function_info_extractor) + + # Handle mappings (dict-like objects) + if isinstance(obj, Mapping): + # Process both keys and values + processed_items = [ + ( + process_structure(k, visited, function_info_extractor), + process_structure(v, visited, function_info_extractor), + ) + for k, v in obj.items() + ] + + # Sort by the processed keys for deterministic order + processed_items.sort(key=lambda x: str(x[0])) + + # Create a new dictionary with string keys based on processed keys + # TODO: consider checking for possibly problematic values in processed_k + # and issue a warning + return { + str(processed_k): processed_v + for processed_k, processed_v in processed_items + } + + # Handle sets and frozensets + if isinstance(obj, (set, frozenset)): + logger.debug( + f"Processing set/frozenset of type {type(obj).__name__} with {len(obj)} items" + ) + # Process each item first, then sort the processed results + processed_items = [ + process_structure(item, visited, function_info_extractor) for item in obj + ] + return sorted(processed_items, key=str) + + # Handle collections (list-like objects) + if isinstance(obj, Collection): + logger.debug( + f"Processing collection of type {type(obj).__name__} with {len(obj)} items" + ) + return [ + process_structure(item, visited, function_info_extractor) for item in obj + ] + + # For functions, use the function_content_hash + if callable(obj) and hasattr(obj, "__code__"): + logger.debug(f"Processing function: {getattr(obj, '__name__')}") + if function_info_extractor is not None: + # Use the extractor to get a stable representation + function_info = function_info_extractor.extract_function_info(obj) + logger.debug(f"Extracted function info: {function_info} for {obj.__name__}") + + # simply return the function info as a stable representation + return function_info + else: + raise ValueError( + f"Function {obj} encountered during processing but FunctionInfoExtractor is missing" + ) + + # handle data types + if isinstance(obj, type): + logger.debug(f"Processing class/type: {obj.__name__}") + return f"type:{obj.__name__}" + + # For other objects, attempt to create deterministic representation only if force_hash=True + class_name = obj.__class__.__name__ + module_name = obj.__class__.__module__ + if force_hash: + try: + import re + + logger.debug( + f"Processing generic object of type {module_name}.{class_name}" + ) + + # Try to get a stable dict representation if possible + if hasattr(obj, "__dict__"): + # Sort attributes to ensure stable order + attrs = sorted( + (k, v) for k, v in obj.__dict__.items() if not k.startswith("_") + ) + # Limit to first 10 attributes to avoid extremely long representations + if len(attrs) > 10: + logger.debug( + f"Object has {len(attrs)} attributes, limiting to first 10" + ) + attrs = attrs[:10] + attr_strs = [f"{k}={type(v).__name__}" for k, v in attrs] + obj_repr = f"{{{', '.join(attr_strs)}}}" + else: + # Get basic repr but remove memory addresses + logger.debug( + "Object has no __dict__, using repr() with memory address removal" + ) + obj_repr = repr(obj) + if len(obj_repr) > 1000: + logger.debug( + f"Object repr is {len(obj_repr)} chars, truncating to 1000" + ) + obj_repr = obj_repr[:1000] + "..." + # Remove memory addresses which look like '0x7f9a1c2b3d4e' + obj_repr = re.sub(r" at 0x[0-9a-f]+", " at 0xMEMADDR", obj_repr) + + return f"{module_name}.{class_name}:{obj_repr}" + except Exception as e: + # Last resort - use class name only + logger.warning(f"Failed to process object representation: {e}") + try: + return f"object:{obj.__class__.__module__}.{obj.__class__.__name__}" + except AttributeError: + logger.error("Could not determine object class, using UnknownObject") + return "UnknownObject" + else: + raise ValueError( + f"Processing of {obj} of type {module_name}.{class_name} is not supported" + ) + + +def hash_object( + obj: Any, + function_info_extractor: FunctionInfoExtractor | None = None, +) -> bytes: + # Process the object to handle nested structures and HashableMixin instances + processed = process_structure(obj, function_info_extractor=function_info_extractor) + + # Serialize the processed structure + json_str = json.dumps(processed, sort_keys=True, separators=(",", ":")).encode( + "utf-8" + ) + logger.debug( + f"Successfully serialized {type(obj).__name__} using custom serializer" + ) + + # Create the hash + return hashlib.sha256(json_str).digest() + + +def hash_file(file_path, algorithm="sha256", buffer_size=65536) -> bytes: + """ + Calculate the hash of a file using the specified algorithm. + + Parameters: + file_path (str): Path to the file to hash + algorithm (str): Hash algorithm to use - options include: + 'md5', 'sha1', 'sha256', 'sha512', 'xxh64', 'crc32', 'hash_path' + buffer_size (int): Size of chunks to read from the file at a time + + Returns: + str: Hexadecimal digest of the hash + """ + # Verify the file exists + if not Path(file_path).is_file(): + raise FileNotFoundError(f"The file {file_path} does not exist") + + # Handle special case for 'hash_path' algorithm + if algorithm == "hash_path": + # Hash the name of the file instead of its content + # This is useful for cases where the file content is well known or + # not relevant + hasher = hashlib.sha256() + hasher.update(file_path.encode("utf-8")) + return hasher.digest() + + # Handle non-cryptographic hash functions + if algorithm == "xxh64": + hasher = xxhash.xxh64() + with open(file_path, "rb") as file: + while True: + data = file.read(buffer_size) + if not data: + break + hasher.update(data) + return hasher.digest() + + if algorithm == "crc32": + crc = 0 + with open(file_path, "rb") as file: + while True: + data = file.read(buffer_size) + if not data: + break + crc = zlib.crc32(data, crc) + return (crc & 0xFFFFFFFF).to_bytes(4, byteorder="big") + + # Handle cryptographic hash functions from hashlib + try: + hasher = hashlib.new(algorithm) + except ValueError: + valid_algorithms = ", ".join(sorted(hashlib.algorithms_available)) + raise ValueError( + f"Invalid algorithm: {algorithm}. Available algorithms: {valid_algorithms}, xxh64, crc32" + ) + + with open(file_path, "rb") as file: + while True: + data = file.read(buffer_size) + if not data: + break + hasher.update(data) + + return hasher.digest() + + +def get_function_signature( + func: Callable, + name_override: str | None = None, + include_defaults: bool = True, + include_module: bool = True, + output_names: Collection[str] | None = None, +) -> str: + """ + Get a stable string representation of a function's signature. + + Args: + func: The function to process + include_defaults: Whether to include default values + include_module: Whether to include the module name + + Returns: + A string representation of the function signature + """ + sig = inspect.signature(func) + + # Build the signature string + parts = {} + + # Add module if requested + if include_module and hasattr(func, "__module__"): + parts["module"] = func.__module__ + + # Add function name + parts["name"] = name_override or func.__name__ + + # Add parameters + param_strs = [] + for name, param in sig.parameters.items(): + param_str = str(param) + if not include_defaults and "=" in param_str: + param_str = param_str.split("=")[0].strip() + param_strs.append(param_str) + + parts["params"] = f"({', '.join(param_strs)})" + + # Add return annotation if present + if sig.return_annotation is not inspect.Signature.empty: + parts["returns"] = sig.return_annotation + + # TODO: fix return handling + fn_string = f"{parts['module'] + '.' if 'module' in parts else ''}{parts['name']}{parts['params']}" + if "returns" in parts: + fn_string = fn_string + f"-> {str(parts['returns'])}" + return fn_string diff --git a/src/orcapod/hashing/hashing_legacy.py b/src/orcapod/hashing/hashing_legacy.py deleted file mode 100644 index 353a4f9..0000000 --- a/src/orcapod/hashing/hashing_legacy.py +++ /dev/null @@ -1,269 +0,0 @@ -# # a function to hash a dictionary of key value pairs into uuid -# from collections.abc import Collection, Mapping -# import hashlib -# import uuid -# from uuid import UUID -# from typing import Any, Dict, Optional, Union -# import inspect -# import json - -# import hashlib - -# # arbitrary depth of nested dictionaries -# T = Dict[str, Union[str, "T"]] - - -# # TODO: implement proper recursive hashing - - -# def hash_dict(d: T) -> UUID: -# # Convert the dictionary to a string representation -# dict_str = str(sorted(d.items())) - -# # Create a hash of the string representation -# hash_object = hashlib.sha256(dict_str.encode("utf-8")) - -# # Convert the hash to a UUID -# hash_uuid = uuid.UUID(hash_object.hexdigest()) - -# return hash_uuid - - -# def stable_hash(s): -# """Create a stable hash that returns the same integer value across sessions.""" -# # Convert input to bytes if it's not already -# if not isinstance(s, bytes): -# s = str(s).encode("utf-8") - -# hash_hex = hashlib.sha256(s).hexdigest() -# return int(hash_hex[:16], 16) - - -# def hash_function(function, function_hash_mode: str = "content", hasher_kwargs=None) -> str: -# """ -# Hash a function based on its content, signature, or name. -# -# Args: -# function: The function to hash -# function_hash_mode: The mode of hashing ('content', 'signature', 'name') -# function_name: Optional name for the function (if not provided, uses function's __name__) - -# Returns: -# A string representing the hash of the function -# """ -# if hasher_kwargs is None: -# hasher_kwargs = {} - -# if function_hash_mode == "content": -# function_hash = function_content_hash(function, **hasher_kwargs) -# elif function_hash_mode == "signature": -# function_hash = stable_hash(get_function_signature(function, **hasher_kwargs)) -# elif function_hash_mode == "name": -# function_hash = stable_hash(function.__name__) - -# return function_hash - - -# def function_content_hash( -# func, exclude_name=False, exclude_module=False, exclude_declaration=False, return_components=False -# ): -# """ -# Compute a hash based on the function's source code, name, module, and closure variables. -# """ -# components = [] - -# # Add function name -# if not exclude_name: -# components.append(f"name:{func.__name__}") - -# # Add module -# if not exclude_module: -# components.append(f"module:{func.__module__}") - -# # Get the function's source code -# try: -# source = inspect.getsource(func) -# # Clean up the source code -# source = source.strip() -# # Remove the function definition line -# if exclude_declaration: -# # find the line that starts with def and remove it -# # TODO: consider dealing with more sophisticated cases like decorators -# source = "\n".join(line for line in source.split("\n") if not line.startswith("def ")) -# components.append(f"source:{source}") -# except (IOError, TypeError): -# # If we can't get the source (e.g., built-in function), use the function's string representation -# components.append(f"repr:{repr(func)}") - -# # Add closure variables if any -# if func.__closure__: -# closure_values = [] -# for cell in func.__closure__: -# # Try to get a stable representation of the cell content -# try: -# # For simple immutable objects -# if isinstance(cell.cell_contents, (int, float, str, bool, type(None))): -# closure_values.append(repr(cell.cell_contents)) -# # For other objects, we'll use their string representation -# else: -# closure_values.append(str(cell.cell_contents)) -# except: -# # If we can't get a stable representation, use the cell's id -# closure_values.append(f"cell_id:{id(cell)}") - -# components.append(f"closure:{','.join(closure_values)}") - -# # Add function attributes that affect behavior -# if hasattr(func, "__defaults__") and func.__defaults__: -# defaults_str = ",".join(repr(d) for d in func.__defaults__) -# components.append(f"defaults:{defaults_str}") - -# if hasattr(func, "__kwdefaults__") and func.__kwdefaults__: -# kwdefaults_str = ",".join(f"{k}={repr(v)}" for k, v in func.__kwdefaults__.items()) -# components.append(f"kwdefaults:{kwdefaults_str}") - -# # Function's code object properties (excluding filename and line numbers) -# code = func.__code__ -# code_props = { -# "co_argcount": code.co_argcount, -# "co_posonlyargcount": getattr(code, "co_posonlyargcount", 0), # Python 3.8+ -# "co_kwonlyargcount": code.co_kwonlyargcount, -# "co_nlocals": code.co_nlocals, -# "co_stacksize": code.co_stacksize, -# "co_flags": code.co_flags, -# "co_code": code.co_code, -# "co_names": code.co_names, -# "co_varnames": code.co_varnames, -# } -# components.append(f"code_properties:{repr(code_props)}") -# if return_components: -# return components - -# # Join all components and compute hash -# combined = "\n".join(components) -# return hashlib.sha256(combined.encode("utf-8")).hexdigest() - - -# class HashableMixin: -# """A mixin that provides content-based hashing functionality.""" - -# def identity_structure(self) -> Any: -# """ -# Return a structure that represents the identity of this object. -# By default, returns None to indicate that no custom structure is provided. -# Subclasses should override this method to provide meaningful representations. - -# Returns: -# None to indicate no custom structure (use default hash) -# """ -# return None - -# def content_hash(self, char_count: Optional[int] = 16) -> str: -# """ -# Generate a stable string hash based on the object's content. - -# Returns: -# str: A hexadecimal digest representing the object's content -# """ -# # Get the identity structure -# structure = self.identity_structure() - -# # TODO: consider returning __hash__ based value if structure is None - -# # Generate a hash from the identity structure -# return self._hash_structure(structure, char_count=char_count) - -# def content_hash_int(self, hexdigits=16) -> int: -# """ -# Generate a stable integer hash based on the object's content. - -# Returns: -# int: An integer representing the object's content -# """ -# return int(self.content_hash(char_count=None)[:hexdigits], 16) - -# def __hash__(self) -> int: -# """ -# Hash implementation that uses the identity structure if provided, -# otherwise falls back to the superclass's hash method. - -# Returns: -# int: A hash value based on either content or identity -# """ -# # Get the identity structure -# structure = self.identity_structure() - -# # If no custom structure is provided, use the superclass's hash -# if structure is None: -# return super().__hash__() - -# # Generate a hash and convert to integer -# hash_hex = self._hash_structure(structure, char_count=None) -# return int(hash_hex[:16], 16) - -# def _hash_structure(self, structure: Any, char_count: Optional[int] = 16) -> str: -# """ -# Helper method to compute a hash string from a structure. - -# Args: -# structure: The structure to hash - -# Returns: -# str: A hexadecimal hash digest of the structure -# """ -# processed = self._process_structure(structure) -# json_str = json.dumps(processed, sort_keys=True).encode() -# return hashlib.sha256(json_str).hexdigest()[:char_count] - -# def _process_structure(self, obj: Any) -> Any: -# """ -# Recursively process a structure to prepare it for hashing. - -# Args: -# obj: The object or structure to process - -# Returns: -# A processed version of the structure with HashableMixin objects replaced by their hashes -# """ -# # Handle None -# if obj is None: -# return "None" - -# # If the object is a HashableMixin, use its content_hash -# if isinstance(obj, HashableMixin): -# # Don't call content_hash on self to avoid cycles -# if obj is self: -# # TODO: carefully consider this case -# # Use the superclass's hash for self -# return str(super(HashableMixin, self).__hash__()) -# return obj.content_hash() - -# # Handle basic types -# if isinstance(obj, (str, int, float, bool)): -# return str(obj) - -# # Handle named tuples (which are subclasses of tuple) -# if hasattr(obj, "_fields") and isinstance(obj, tuple): -# # For namedtuples, convert to dict and then process -# return self._process_structure({field: value for field, value in zip(obj._fields, obj)}) - -# # Handle mappings (dict-like objects) -# if isinstance(obj, Mapping): -# return {str(k): self._process_structure(v) for k, v in sorted(obj.items(), key=lambda x: str(x[0]))} - -# # Handle sets and frozensets specifically -# if isinstance(obj, (set, frozenset)): -# # Process each item first, then sort the processed results -# processed_items = [self._process_structure(item) for item in obj] -# return sorted(processed_items, key=str) - -# # Handle collections (list-like objects) -# if isinstance(obj, Collection): -# return [self._process_structure(item) for item in obj] - -# # For bytes and bytearray, convert to hex representation -# if isinstance(obj, (bytes, bytearray)): -# return obj.hex() - -# # For other objects, just use their string representation -# return str(obj) diff --git a/src/orcapod/hashing/core.py b/src/orcapod/hashing/legacy_core.py similarity index 94% rename from src/orcapod/hashing/core.py rename to src/orcapod/hashing/legacy_core.py index c711f63..83d172b 100644 --- a/src/orcapod/hashing/core.py +++ b/src/orcapod/hashing/legacy_core.py @@ -1,17 +1,9 @@ -""" -Stable Hashing Library -====================== - -A library for creating stable, content-based hashes that remain consistent across Python sessions, -suitable for arbitrarily nested data structures and custom objects via HashableMixin. -""" - import hashlib import inspect import json import logging import zlib -from .types import FunctionInfoExtractor +from orcapod.protocols.hashing_protocols import FunctionInfoExtractor from functools import partial from os import PathLike from pathlib import Path @@ -29,11 +21,22 @@ ) from uuid import UUID + import xxhash -from orcapod.types import Packet, PathSet +from orcapod.types import PathSet, Packet, PacketLike from orcapod.utils.name import find_noncolliding_name +WARN_NONE_IDENTITY = False +""" +Stable Hashing Library +====================== + +A library for creating stable, content-based hashes that remain consistent across Python sessions, +suitable for arbitrarily nested data structures and custom objects via HashableMixin. +""" + + # Configure logging with __name__ for proper hierarchy logger = logging.getLogger(__name__) @@ -175,11 +178,12 @@ def content_hash(self, char_count: Optional[int] = 16) -> str: # If no custom structure is provided, use the class name # We avoid using id() since it's not stable across sessions if structure is None: - logger.warning( - f"HashableMixin.content_hash called on {self.__class__.__name__} " - "instance that returned identity_structure() of None. " - "Using class name as default identity, which may not correctly reflect object uniqueness." - ) + if WARN_NONE_IDENTITY: + logger.warning( + f"HashableMixin.content_hash called on {self.__class__.__name__} " + "instance that returned identity_structure() of None. " + "Using class name as default identity, which may not correctly reflect object uniqueness." + ) # Fall back to class name for consistent behavior return f"HashableMixin-DefaultIdentity-{self.__class__.__name__}" @@ -205,11 +209,12 @@ def content_hash_int(self, hexdigits: int = 16) -> int: # If no custom structure is provided, use the class name # We avoid using id() since it's not stable across sessions if structure is None: - logger.warning( - f"HashableMixin.content_hash_int called on {self.__class__.__name__} " - "instance without identity_structure() implementation. " - "Using class name as default identity, which may not correctly reflect object uniqueness." - ) + if WARN_NONE_IDENTITY: + logger.warning( + f"HashableMixin.content_hash_int called on {self.__class__.__name__} " + "instance that returned identity_structure() of None. " + "Using class name as default identity, which may not correctly reflect object uniqueness." + ) # Use the same default identity as content_hash for consistency default_identity = ( f"HashableMixin-DefaultIdentity-{self.__class__.__name__}" @@ -235,11 +240,12 @@ def content_hash_uuid(self) -> UUID: # If no custom structure is provided, use the class name # We avoid using id() since it's not stable across sessions if structure is None: - logger.warning( - f"HashableMixin.content_hash_uuid called on {self.__class__.__name__} " - "instance without identity_structure() implementation. " - "Using class name as default identity, which may not correctly reflect object uniqueness." - ) + if WARN_NONE_IDENTITY: + logger.warning( + f"HashableMixin.content_hash_uuid called on {self.__class__.__name__} " + "instance without identity_structure() implementation. " + "Using class name as default identity, which may not correctly reflect object uniqueness." + ) # Use the same default identity as content_hash for consistency default_identity = ( f"HashableMixin-DefaultIdentity-{self.__class__.__name__}" @@ -433,6 +439,16 @@ def process_structure( logger.debug(f"Processing HashableMixin instance of type {type(obj).__name__}") return obj.content_hash() + from .content_identifiable import ContentIdentifiableBase + + if isinstance(obj, ContentIdentifiableBase): + logger.debug( + f"Processing ContentHashableBase instance of type {type(obj).__name__}" + ) + return process_structure( + obj.identity_structure(), visited, function_info_extractor + ) + # Handle basic types if isinstance(obj, (str, int, float, bool)): return obj @@ -666,7 +682,7 @@ def hash_packet_with_psh( def hash_packet( - packet: Packet, + packet: PacketLike, algorithm: str = "sha256", buffer_size: int = 65536, char_count: Optional[int] = 32, @@ -829,6 +845,7 @@ def get_function_signature( name_override: str | None = None, include_defaults: bool = True, include_module: bool = True, + output_names: Collection[str] | None = None, ) -> str: """ Get a stable string representation of a function's signature. @@ -844,14 +861,14 @@ def get_function_signature( sig = inspect.signature(func) # Build the signature string - parts = [] + parts = {} # Add module if requested if include_module and hasattr(func, "__module__"): - parts.append(f"module:{func.__module__}") + parts["module"] = func.__module__ # Add function name - parts.append(f"name:{name_override or func.__name__}") + parts["name"] = name_override or func.__name__ # Add parameters param_strs = [] @@ -861,13 +878,17 @@ def get_function_signature( param_str = param_str.split("=")[0].strip() param_strs.append(param_str) - parts.append(f"params:({', '.join(param_strs)})") + parts["params"] = f"({', '.join(param_strs)})" # Add return annotation if present if sig.return_annotation is not inspect.Signature.empty: - parts.append(f"returns:{sig.return_annotation}") + parts["returns"] = sig.return_annotation - return " ".join(parts) + # TODO: fix return handling + fn_string = f"{parts['module'] + '.' if 'module' in parts else ''}{parts['name']}{parts['params']}" + if "returns" in parts: + fn_string = fn_string + f"-> {str(parts['returns'])}" + return fn_string def _is_in_string(line, pos): diff --git a/src/orcapod/hashing/object_hashers.py b/src/orcapod/hashing/object_hashers.py index a3f4b39..2a92f69 100644 --- a/src/orcapod/hashing/object_hashers.py +++ b/src/orcapod/hashing/object_hashers.py @@ -1,8 +1,88 @@ -from .types import FunctionInfoExtractor, ObjectHasher -from .core import legacy_hash +from orcapod.protocols.hashing_protocols import FunctionInfoExtractor +from orcapod.hashing import legacy_core +from orcapod.hashing import hash_utils +from typing import Any +import uuid +from abc import ABC, abstractmethod -class LegacyObjectHasher(ObjectHasher): +class ObjectHasherBase(ABC): + @abstractmethod + def hash(self, obj: object) -> bytes: ... + + @abstractmethod + def get_hasher_id(self) -> str: ... + + def hash_to_hex( + self, obj: Any, char_count: int | None = None, prefix_hasher_id: bool = False + ) -> str: + hash_bytes = self.hash(obj) + hex_str = hash_bytes.hex() + + # TODO: clean up this logic, as char_count handling is messy + if char_count is not None: + if char_count > len(hex_str): + raise ValueError( + f"Cannot truncate to {char_count} chars, hash only has {len(hex_str)}" + ) + hex_str = hex_str[:char_count] + if prefix_hasher_id: + hex_str = self.get_hasher_id() + "@" + hex_str + return hex_str + + def hash_to_int(self, obj: Any, hexdigits: int = 16) -> int: + """ + Hash an object to an integer. + + Args: + obj (Any): The object to hash. + hexdigits (int): Number of hexadecimal digits to use for the hash. + + Returns: + int: The integer representation of the hash. + """ + hex_hash = self.hash_to_hex(obj, char_count=hexdigits) + return int(hex_hash, 16) + + def hash_to_uuid( + self, obj: Any, namespace: uuid.UUID = uuid.NAMESPACE_OID + ) -> uuid.UUID: + """Convert hash to proper UUID5.""" + return uuid.uuid5(namespace, self.hash(obj)) + + +class BasicObjectHasher(ObjectHasherBase): + """ + Default object hasher used throughout the codebase. + """ + + def __init__( + self, + hasher_id: str, + function_info_extractor: FunctionInfoExtractor | None = None, + ): + self._hasher_id = hasher_id + self.function_info_extractor = function_info_extractor + + def get_hasher_id(self) -> str: + return self._hasher_id + + def hash(self, obj: object) -> bytes: + """ + Hash an object to a byte representation. + + Args: + obj (object): The object to hash. + + Returns: + bytes: The byte representation of the hash. + """ + return hash_utils.hash_object( + obj, function_info_extractor=self.function_info_extractor + ) + + +class LegacyObjectHasher(ObjectHasherBase): """ Legacy object hasher that returns the string representation of the object. @@ -13,7 +93,6 @@ class LegacyObjectHasher(ObjectHasher): def __init__( self, - char_count: int | None = 32, function_info_extractor: FunctionInfoExtractor | None = None, ): """ @@ -22,9 +101,14 @@ def __init__( Args: function_info_extractor (FunctionInfoExtractor | None): Optional extractor for function information. This must be provided if an object containing function information is to be hashed. """ - self.char_count = char_count self.function_info_extractor = function_info_extractor + def get_hasher_id(self) -> str: + """ + Returns a unique identifier/name assigned to the hasher + """ + return "legacy_object_hasher" + def hash(self, obj: object) -> bytes: """ Hash an object to a byte representation. @@ -35,4 +119,6 @@ def hash(self, obj: object) -> bytes: Returns: bytes: The byte representation of the hash. """ - return legacy_hash(obj, function_info_extractor=self.function_info_extractor) + return legacy_core.legacy_hash( + obj, function_info_extractor=self.function_info_extractor + ) diff --git a/src/orcapod/hashing/semantic_type_hashers.py b/src/orcapod/hashing/semantic_type_hashers.py new file mode 100644 index 0000000..bcd489f --- /dev/null +++ b/src/orcapod/hashing/semantic_type_hashers.py @@ -0,0 +1,97 @@ +from orcapod.protocols.hashing_protocols import ( + SemanticTypeHasher, + FileContentHasher, + StringCacher, +) +import os +import hashlib +import pyarrow as pa + + +class PathHasher(SemanticTypeHasher): + """Hasher for Path semantic type columns - hashes file contents.""" + + def __init__( + self, + file_hasher: FileContentHasher, + handle_missing: str = "error", + string_cacher: StringCacher | None = None, + cache_key_prefix: str = "path_hasher", + ): + """ + Initialize PathHasher. + + Args: + chunk_size: Size of chunks to read files in bytes + handle_missing: How to handle missing files ('error', 'skip', 'null_hash') + """ + self.file_hasher = file_hasher + self.handle_missing = handle_missing + self.cacher = string_cacher + self.cache_key_prefix = cache_key_prefix + + def _hash_file_content(self, file_path: str) -> bytes: + """Hash the content of a single file""" + import os + + # if cacher exists, check if the hash is cached + if self.cacher: + cache_key = f"{self.cache_key_prefix}:{file_path}" + cached_hash_hex = self.cacher.get_cached(cache_key) + if cached_hash_hex is not None: + return bytes.fromhex(cached_hash_hex) + + try: + if not os.path.exists(file_path): + if self.handle_missing == "error": + raise FileNotFoundError(f"File not found: {file_path}") + elif self.handle_missing == "skip": + return hashlib.sha256(b"").digest() + elif self.handle_missing == "null_hash": + return hashlib.sha256(b"").digest() + + hashed_value = self.file_hasher.hash_file(file_path) + if self.cacher: + # Cache the computed hash hex + self.cacher.set_cached( + f"{self.cache_key_prefix}:{file_path}", hashed_value.hex() + ) + return hashed_value + + except (IOError, OSError, PermissionError) as e: + if self.handle_missing == "error": + raise IOError(f"Cannot read file {file_path}: {e}") + else: # skip or null_hash + error_msg = f"" + return hashlib.sha256(error_msg.encode("utf-8")).digest() + + def hash_column(self, column: pa.Array) -> pa.Array: + """ + Replace path column with file content hashes. + Returns a new array where each path is replaced with its file content hash. + """ + + # Convert to python list for processing + paths = column.to_pylist() + + # Hash each file's content individually + content_hashes = [] + for path in paths: + if path is not None: + # Normalize path for consistency + normalized_path = os.path.normpath(str(path)) + file_content_hash = self._hash_file_content(normalized_path) + content_hashes.append(file_content_hash) + else: + content_hashes.append(None) # Preserve nulls + + # Return new array with content hashes instead of paths + return pa.array(content_hashes) + + def set_cacher(self, cacher: StringCacher) -> None: + """ + Add a string cacher for caching hash values. + This is a no-op for PathHasher since it hashes file contents directly. + """ + # PathHasher does not use string caching, so this is a no-op + self.cacher = cacher diff --git a/src/orcapod/hashing/string_cachers.py b/src/orcapod/hashing/string_cachers.py index 9b2244a..bb09eff 100644 --- a/src/orcapod/hashing/string_cachers.py +++ b/src/orcapod/hashing/string_cachers.py @@ -6,17 +6,19 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, TYPE_CHECKING -from orcapod.hashing.types import StringCacher +from orcapod.protocols.hashing_protocols import StringCacher logger = logging.getLogger(__name__) if TYPE_CHECKING: import redis + def _get_redis(): """Lazy import for Redis to avoid circular dependencies.""" try: import redis + return redis except ImportError as e: return None @@ -615,7 +617,9 @@ def __init__( # TODO: cleanup the redis use pattern self._redis_module = _get_redis() if self._redis_module is None: - raise ImportError("Could not import Redis module. redis package is required for RedisCacher") + raise ImportError( + "Could not import Redis module. redis package is required for RedisCacher" + ) self.key_prefix = key_prefix self._connection_failed = False self._lock = threading.RLock() @@ -658,7 +662,10 @@ def _test_connection(self) -> None: f"Redis connection established successfully with prefix '{self.key_prefix}'" ) - except (self._redis_module.RedisError, self._redis_module.ConnectionError) as e: + except ( + self._redis_module.RedisError, + self._redis_module.ConnectionError, + ) as e: logging.error(f"Failed to establish Redis connection: {e}") raise RuntimeError(f"Redis connection test failed: {e}") @@ -690,7 +697,10 @@ def get_cached(self, cache_key: str) -> str | None: return str(result) - except (self._redis_module.RedisError, self._redis_module.ConnectionError) as e: + except ( + self._redis_module.RedisError, + self._redis_module.ConnectionError, + ) as e: self._handle_redis_error("get", e) return None @@ -708,7 +718,10 @@ def set_cached(self, cache_key: str, value: str) -> None: self.redis.set(self._get_prefixed_key(cache_key), value) - except (self._redis_module.RedisError, self._redis_module.ConnectionError) as e: + except ( + self._redis_module.RedisError, + self._redis_module.ConnectionError, + ) as e: self._handle_redis_error("set", e) def clear_cache(self) -> None: @@ -722,7 +735,10 @@ def clear_cache(self) -> None: if keys: self.redis.delete(*list(keys)) # type: ignore[arg-type] - except (self._redis_module.RedisError, self._redis_module.ConnectionError) as e: + except ( + self._redis_module.RedisError, + self._redis_module.ConnectionError, + ) as e: self._handle_redis_error("clear", e) def is_connected(self) -> bool: diff --git a/src/orcapod/hashing/types.py b/src/orcapod/hashing/types.py index 5e8b07c..6306d94 100644 --- a/src/orcapod/hashing/types.py +++ b/src/orcapod/hashing/types.py @@ -5,7 +5,7 @@ from typing import Any, Protocol, runtime_checkable import uuid -from orcapod.types import Packet, PathLike, PathSet, TypeSpec +from orcapod.types import PacketLike, PathLike, PathSet, TypeSpec import pyarrow as pa @@ -29,6 +29,7 @@ def identity_structure(self) -> Any: class ObjectHasher(ABC): """Abstract class for general object hashing.""" + # TODO: consider more explicitly stating types of objects accepted @abstractmethod def hash(self, obj: Any) -> bytes: """ @@ -42,7 +43,15 @@ def hash(self, obj: Any) -> bytes: """ ... - def hash_to_hex(self, obj: Any, char_count: int | None = None) -> str: + @abstractmethod + def get_hasher_id(self) -> str: + """ + Returns a unique identifier/name assigned to the hasher + """ + + def hash_to_hex( + self, obj: Any, char_count: int | None = None, prefix_hasher_id: bool = False + ) -> str: hash_bytes = self.hash(obj) hex_str = hash_bytes.hex() @@ -52,7 +61,9 @@ def hash_to_hex(self, obj: Any, char_count: int | None = None) -> str: raise ValueError( f"Cannot truncate to {char_count} chars, hash only has {len(hex_str)}" ) - return hex_str[:char_count] + hex_str = hex_str[:char_count] + if prefix_hasher_id: + hex_str = self.get_hasher_id() + "@" + hex_str return hex_str def hash_to_int(self, obj: Any, hexdigits: int = 16) -> int: @@ -73,42 +84,23 @@ def hash_to_uuid( self, obj: Any, namespace: uuid.UUID = uuid.NAMESPACE_OID ) -> uuid.UUID: """Convert hash to proper UUID5.""" - # Use the hex representation as input to UUID5 return uuid.uuid5(namespace, self.hash(obj)) @runtime_checkable -class FileHasher(Protocol): +class FileContentHasher(Protocol): """Protocol for file-related hashing.""" - def hash_file(self, file_path: PathLike) -> str: ... - - -# Higher-level operations that compose file hashing -@runtime_checkable -class PathSetHasher(Protocol): - """Protocol for hashing pathsets (files, directories, collections).""" - - def hash_pathset(self, pathset: PathSet) -> str: ... - - -@runtime_checkable -class SemanticHasher(Protocol): - pass - - -@runtime_checkable -class PacketHasher(Protocol): - """Protocol for hashing packets.""" - - def hash_packet(self, packet: Packet) -> str: ... + def hash_file(self, file_path: PathLike) -> bytes: ... @runtime_checkable class ArrowHasher(Protocol): """Protocol for hashing arrow packets.""" - def hash_table(self, table: pa.Table) -> str: ... + def get_hasher_id(self) -> str: ... + + def hash_table(self, table: pa.Table, prefix_hasher_id: bool = True) -> str: ... @runtime_checkable @@ -120,14 +112,6 @@ def set_cached(self, cache_key: str, value: str) -> None: ... def clear_cache(self) -> None: ... -# Combined interface for convenience (optional) -@runtime_checkable -class CompositeFileHasher(FileHasher, PathSetHasher, PacketHasher, Protocol): - """Combined interface for all file-related hashing operations.""" - - pass - - # Function hasher protocol @runtime_checkable class FunctionInfoExtractor(Protocol): @@ -137,6 +121,58 @@ def extract_function_info( self, func: Callable[..., Any], function_name: str | None = None, - input_types: TypeSpec | None = None, - output_types: TypeSpec | None = None, + input_typespec: TypeSpec | None = None, + output_typespec: TypeSpec | None = None, ) -> dict[str, Any]: ... + + +class SemanticTypeHasher(Protocol): + """Abstract base class for semantic type-specific hashers.""" + + @abstractmethod + def hash_column( + self, + column: pa.Array, + ) -> pa.Array: + """Hash a column with this semantic type and return the hash bytes.""" + pass + + @abstractmethod + def set_cacher(self, cacher: StringCacher) -> None: + """Add a string cacher for caching hash values.""" + pass + + +# ---------------Legacy implementations and protocols to be deprecated--------------------- + + +@runtime_checkable +class LegacyFileHasher(Protocol): + """Protocol for file-related hashing.""" + + def hash_file(self, file_path: PathLike) -> str: ... + + +# Higher-level operations that compose file hashing +@runtime_checkable +class LegacyPathSetHasher(Protocol): + """Protocol for hashing pathsets (files, directories, collections).""" + + def hash_pathset(self, pathset: PathSet) -> str: ... + + +@runtime_checkable +class LegacyPacketHasher(Protocol): + """Protocol for hashing packets.""" + + def hash_packet(self, packet: PacketLike) -> str: ... + + +# Combined interface for convenience (optional) +@runtime_checkable +class LegacyCompositeFileHasher( + LegacyFileHasher, LegacyPathSetHasher, LegacyPacketHasher, Protocol +): + """Combined interface for all file-related hashing operations.""" + + pass diff --git a/src/orcapod/hashing/versioned_hashers.py b/src/orcapod/hashing/versioned_hashers.py new file mode 100644 index 0000000..91b7931 --- /dev/null +++ b/src/orcapod/hashing/versioned_hashers.py @@ -0,0 +1,88 @@ +# A collection of versioned hashers that provide a "default" implementation of hashers. +from .arrow_hashers import SemanticArrowHasher +from orcapod.utils.object_spec import parse_objectspec +from orcapod.protocols.hashing_protocols import ObjectHasher + +CURRENT_VERSION = "v0.1" + +versioned_semantic_arrow_hashers = { + "v0.1": { + "_class": "orcapod.hashing.arrow_hashers.SemanticArrowHasher", + "_config": { + "hasher_id": "arrow_v0.1", + "hash_algorithm": "sha256", + "chunk_size": 8192, + "serialization_method": "logical", + "semantic_type_hashers": { + "path": { + "_class": "orcapod.hashing.semantic_type_hashers.PathHasher", + "_config": { + "file_hasher": { + "_class": "orcapod.hashing.file_hashers.BasicFileHasher", + "_config": { + "algorithm": "sha256", + }, + } + }, + } + }, + }, + } +} + +versioned_object_hashers = { + "v0.1": { + "_class": "orcapod.hashing.object_hashers.BasicObjectHasher", + "_config": { + "hasher_id": "object_v0.1", + "function_info_extractor": { + "_class": "orcapod.hashing.function_info_extractors.FunctionSignatureExtractor", + "_config": {"include_module": True, "include_defaults": True}, + }, + }, + } +} + + +def get_versioned_semantic_arrow_hasher( + version: str | None = None, +) -> SemanticArrowHasher: + """ + Get the versioned hasher for the specified version. + + Args: + version (str): The version of the hasher to retrieve. + + Returns: + ArrowHasher: An instance of the arrow hasher of the specified version. + """ + if version is None: + version = CURRENT_VERSION + + if version not in versioned_semantic_arrow_hashers: + raise ValueError(f"Unsupported hasher version: {version}") + + hasher_spec = versioned_semantic_arrow_hashers[version] + return parse_objectspec(hasher_spec) + + +def get_versioned_object_hasher( + version: str | None = None, +) -> ObjectHasher: + """ + Get an object hasher for the specified version. + + Args: + version (str): The version of the hasher to retrieve. + + Returns: + Object: An instance of the object hasher of the specified version. + """ + if version is None: + version = CURRENT_VERSION + + if version not in versioned_object_hashers: + raise ValueError(f"Unsupported hasher version: {version}") + + hasher_spec = versioned_object_hashers[version] + return parse_objectspec(hasher_spec) diff --git a/src/orcapod/pipeline/__init__.py b/src/orcapod/pipeline/__init__.py new file mode 100644 index 0000000..9d3e0f5 --- /dev/null +++ b/src/orcapod/pipeline/__init__.py @@ -0,0 +1,5 @@ +# from .legacy_pipeline import Pipeline + +# __all__ = [ +# "Pipeline", +# ] diff --git a/src/orcapod/pipeline/graph.py b/src/orcapod/pipeline/graph.py new file mode 100644 index 0000000..0ba9bf8 --- /dev/null +++ b/src/orcapod/pipeline/graph.py @@ -0,0 +1,142 @@ +from orcapod.data.trackers import GraphTracker, Invocation +from orcapod.pipeline.nodes import KernelNode, PodNode +from orcapod.data.context import DataContext +from orcapod.protocols import data_protocols as dp +from orcapod.protocols import store_protocols as sp +from typing import Any +from collections.abc import Collection +from orcapod.data.streams import WrappedStream +import logging + + +logger = logging.getLogger(__name__) + + +class Pipeline(GraphTracker): + """ + Represents a pipeline in the system. + This class extends GraphTracker to manage the execution of kernels and pods in a pipeline. + """ + + def __init__( + self, + name: str | tuple[str, ...], + pipeline_store: sp.ArrowDataStore, + results_store: sp.ArrowDataStore | None = None, + tracker_manager: dp.TrackerManager | None = None, + data_context: str | DataContext | None = None, + auto_compile: bool = True, + ): + super().__init__(tracker_manager=tracker_manager, data_context=data_context) + if not isinstance(name, tuple): + name = (name,) + self.name = name + self.pipeline_store_path_prefix = self.name + self.results_store_path_prefix = () + if results_store is None: + if pipeline_store is None: + raise ValueError( + "Either pipeline_store or results_store must be provided" + ) + results_store = pipeline_store + self.results_store_path_prefix = self.name + ("_results",) + self.pipeline_store = pipeline_store + self.results_store = results_store + self.nodes = {} + self.auto_compile = auto_compile + self._dirty = False + self._ordered_nodes = [] # Track order of invocations + + def __exit__(self, exc_type=None, exc_value=None, traceback=None): + """ + Exit the pipeline context, ensuring all nodes are properly closed. + """ + super().__exit__(exc_type, exc_value, traceback) + if self.auto_compile: + self.compile() + + def flush(self) -> None: + self.pipeline_store.flush() + self.results_store.flush() + + def record_kernel_invocation( + self, + kernel: dp.Kernel, + upstreams: tuple[dp.Stream, ...], + label: str | None = None, + ) -> None: + super().record_kernel_invocation(kernel, upstreams, label) + self._dirty = True + + def record_pod_invocation( + self, + pod: dp.Pod, + upstreams: tuple[dp.Stream, ...], + label: str | None = None, + ) -> None: + super().record_pod_invocation(pod, upstreams, label) + self._dirty = True + + def compile(self) -> None: + import networkx as nx + + invocation_to_stream_lut = {} + G = self.generate_graph() + for invocation in nx.topological_sort(G): + input_streams = [ + invocation_to_stream_lut[parent] for parent in invocation.parents() + ] + node = self.wrap_invocation(invocation, new_input_streams=input_streams) + invocation_to_stream_lut[invocation] = node() + self.nodes[node.label] = node + + def wrap_invocation( + self, + invocation: Invocation, + new_input_streams: Collection[dp.Stream], + ) -> dp.Kernel: + if invocation in self.invocation_to_pod_lut: + pod = self.invocation_to_pod_lut[invocation] + node = PodNode( + pod=pod, + input_streams=new_input_streams, + result_store=self.results_store, + record_path_prefix=self.results_store_path_prefix, + pipeline_store=self.pipeline_store, + pipeline_path_prefix=self.pipeline_store_path_prefix, + label=invocation.label, + ) + else: + node = KernelNode( + kernel=invocation.kernel, + input_streams=new_input_streams, + pipeline_store=self.pipeline_store, + pipeline_path_prefix=self.pipeline_store_path_prefix, + label=invocation.label, + ) + return node + + def __getattr__(self, item: str) -> Any: + """Allow direct access to pipeline attributes.""" + if item in self.nodes: + return self.nodes[item] + raise AttributeError(f"Pipeline has no attribute '{item}'") + + def __dir__(self) -> list[str]: + """Return a list of attributes and methods of the pipeline.""" + return list(super().__dir__()) + list(self.nodes.keys()) + + def rename(self, old_name: str, new_name: str) -> None: + """ + Rename a node in the pipeline. + This will update the label and the internal mapping. + """ + if old_name not in self.nodes: + raise KeyError(f"Node '{old_name}' does not exist in the pipeline.") + if new_name in self.nodes: + raise KeyError(f"Node '{new_name}' already exists in the pipeline.") + node = self.nodes[old_name] + del self.nodes[old_name] + node.label = new_name + self.nodes[new_name] = node + logger.info(f"Node '{old_name}' renamed to '{new_name}'") diff --git a/src/orcapod/pipeline/legacy_nodes.py b/src/orcapod/pipeline/legacy_nodes.py new file mode 100644 index 0000000..9470c1e --- /dev/null +++ b/src/orcapod/pipeline/legacy_nodes.py @@ -0,0 +1,817 @@ +from orcapod.core.pod import Pod, FunctionPod +from orcapod.core import SyncStream, Source, Kernel +from orcapod.core.streams import PolarsStream +from orcapod.core.streams import EmptyStream +from orcapod.stores import ArrowDataStore +from orcapod.types import Tag, Packet, PacketLike, TypeSpec, default_registry +from orcapod.types.legacy import packets +from orcapod.types.typespec_utils import union_typespecs +from orcapod.types.legacy.semantic_type_registry import SemanticTypeRegistry +from orcapod.types import schemas +from orcapod.hashing import ObjectHasher, ArrowHasher +from orcapod.hashing.defaults import get_default_object_hasher, get_default_arrow_hasher +from typing import Any, Literal +from collections.abc import Collection, Iterator +import polars as pl +from orcapod.core.streams import SyncStreamFromGenerator + +import logging + +logger = logging.getLogger(__name__) + + +def get_tag_typespec(tag: Tag) -> dict[str, type]: + return {k: str for k in tag} + + +class KernelInvocationWrapper(Kernel): + def __init__( + self, kernel: Kernel, input_streams: Collection[SyncStream], **kwargs + ) -> None: + super().__init__(**kwargs) + self.kernel = kernel + self.input_streams = list(input_streams) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}<{self.kernel!r}>" + + def __str__(self) -> str: + return f"{self.__class__.__name__}<{self.kernel}>" + + def computed_label(self) -> str | None: + """ + Return the label of the wrapped kernel. + """ + return self.kernel.label + + def resolve_input_streams(self, *input_streams) -> Collection[SyncStream]: + if input_streams: + raise ValueError( + "Wrapped pod with specified streams cannot be invoked with additional streams" + ) + return self.input_streams + + def identity_structure(self, *streams: SyncStream) -> Any: + """ + Identity structure that includes the wrapped kernel's identity structure. + """ + resolved_streams = self.resolve_input_streams(*streams) + return self.kernel.identity_structure(*resolved_streams) + + def keys( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[Collection[str] | None, Collection[str] | None]: + resolved_streams = self.resolve_input_streams(*streams) + return self.kernel.keys(*resolved_streams, trigger_run=trigger_run) + + def types( + self, *streams: SyncStream, trigger_run: bool = False + ) -> tuple[TypeSpec | None, TypeSpec | None]: + resolved_streams = self.resolve_input_streams(*streams) + return self.kernel.types(*resolved_streams, trigger_run=trigger_run) + + def claims_unique_tags( + self, *streams: SyncStream, trigger_run: bool = False + ) -> bool | None: + resolved_streams = self.resolve_input_streams(*streams) + return self.kernel.claims_unique_tags( + *resolved_streams, trigger_run=trigger_run + ) + + def post_call(self, tag: Tag, packet: Packet) -> None: ... + + def output_iterator_completion_hook(self) -> None: ... + + +class CachedKernelWrapper(KernelInvocationWrapper, Source): + """ + A Kernel wrapper that wraps a kernel and stores the outputs of the kernel. + If the class is instantiated with input_streams that is not None, then this wrapper + will strictly represent the invocation of the wrapped Kernel on the given input streams. + Passing in an empty list into input_streams would still be registered as a specific invocation. + If input_streams is None, the class instance largely acts as a proxy of the underlying kernel + but will try to save all results. Note that depending on the storage type passed in, the saving + may error out if you invoke the instance on input streams with non-compatible schema (e.g., tags with + different keys). + """ + + def __init__( + self, + kernel: Kernel, + input_streams: Collection[SyncStream], + output_store: ArrowDataStore, + store_path_prefix: tuple[str, ...] = (), + kernel_hasher: ObjectHasher | None = None, + arrow_packet_hasher: ArrowHasher | None = None, + packet_type_registry: SemanticTypeRegistry | None = None, + **kwargs, + ) -> None: + super().__init__(kernel, input_streams, **kwargs) + + self.output_store = output_store + self.store_path_prefix = store_path_prefix + + # These are configurable but are not expected to be modified except for special circumstances + if kernel_hasher is None: + kernel_hasher = get_default_object_hasher() + self._kernel_hasher = kernel_hasher + if arrow_packet_hasher is None: + arrow_packet_hasher = get_default_arrow_hasher() + self._arrow_packet_hasher = arrow_packet_hasher + if packet_type_registry is None: + packet_type_registry = default_registry + self._packet_type_registry = packet_type_registry + + self.update_cached_values() + + self._cache_computed = False + + @property + def arrow_hasher(self): + return self._arrow_packet_hasher + + @property + def registry(self): + return self._packet_type_registry + + @property + def kernel_hasher(self) -> ObjectHasher: + if self._kernel_hasher is None: + return get_default_object_hasher() + return self._kernel_hasher + + @kernel_hasher.setter + def kernel_hasher(self, kernel_hasher: ObjectHasher | None = None): + if kernel_hasher is None: + kernel_hasher = get_default_object_hasher() + self._kernel_hasher = kernel_hasher + # hasher changed -- trigger recomputation of properties that depend on kernel hasher + self.update_cached_values() + + @property + def source_info(self) -> tuple[str, ...]: + """ + Returns a tuple of (label, kernel_hash) that uniquely identifies the source of the cached outputs. + This is used to store and retrieve the outputs from the output store. + """ + return self.label, self.kernel_hasher.hash_to_hex( + self.kernel, prefix_hasher_id=True + ) + + @property + def store_path(self) -> tuple[str, ...]: + """ + Returns the path prefix for the output store. + This is used to store and retrieve the outputs from the output store. + """ + return self.store_path_prefix + self.source_info + + def update_cached_values(self): + self.kernel_hash = self.kernel_hasher.hash_to_hex( + self.kernel, prefix_hasher_id=True + ) + tag_keys, packet_keys = self.keys(trigger_run=False) + self.tag_keys = tuple(tag_keys) if tag_keys is not None else None + self.packet_keys = tuple(packet_keys) if packet_keys is not None else None + + self.tag_typespec, self.packet_typespec = self.types(trigger_run=False) + if self.tag_typespec is None or self.packet_typespec is None: + raise ValueError( + "Currently, cached kernel wrapper can only work with kernels that have typespecs defined." + ) + # TODO: clean up and make it unnecessary to convert packet typespec + packet_schema = schemas.PythonSchema(self.packet_typespec) + joined_typespec = union_typespecs( + self.tag_typespec, packet_schema.with_source_info + ) + if joined_typespec is None: + raise ValueError( + "Joined typespec should not be None. " + "This may happen if the tag typespec and packet typespec are incompatible." + ) + # Add any additional fields to the output converter here + self.output_converter = packets.PacketConverter( + joined_typespec, registry=self.registry, include_source_info=False + ) + + def forward(self, *streams: SyncStream, **kwargs) -> SyncStream: + if self._cache_computed: + logger.info(f"Returning cached outputs for {self}") + if (lazy_df := self.get_all_records_as_polars(flush=False)) is not None: + if self.tag_keys is None: + raise ValueError( + "CachedKernelWrapper has no tag keys defined, cannot return PolarsStream" + ) + return PolarsStream( + lazy_df.collect(), + tag_keys=self.tag_keys, + packet_keys=self.packet_keys, + ) + else: + return EmptyStream(tag_keys=self.tag_keys, packet_keys=self.packet_keys) + + resolved_streams = self.resolve_input_streams(*streams) + output_stream = self.kernel.forward(*resolved_streams, **kwargs) + + # Cache the output stream of the underlying kernel + # If an entry with same tag and packet already exists in the output store, + # it will not be added again, thus avoiding duplicates. + def generator() -> Iterator[tuple[Tag, Packet]]: + logger.info(f"Computing and caching outputs for {self}") + for tag, packet in output_stream: + self.post_call(tag, packet) + yield tag, packet + self.output_iterator_completion_hook() + + logger.info(f"Results cached for {self}") + self._cache_computed = True + + return SyncStreamFromGenerator(generator) + + def post_call(self, tag: Tag, packet: Packet) -> None: + # Cache the output stream of the underlying kernel + # If an entry with same tag and packet already exists in the output store, + # it will not be added again, thus avoiding duplicates. + merged_info = {**tag, **packet.get_composite()} + output_table = self.output_converter.from_python_packet_to_arrow_table( + merged_info + ) + # TODO: revisit this logic + output_id = self.arrow_hasher.hash_table(output_table, prefix_hasher_id=True) + if not self.output_store.get_record(self.store_path, output_id, flush=False): + self.output_store.add_record( + self.store_path, + output_id, + output_table, + ) + + def output_iterator_completion_hook(self) -> None: + """ + Hook to be called when the generator is completed. + """ + logger.info(f"Results cached for {self}") + self._cache_computed = True + + def get_all_records_as_polars(self, flush: bool = True) -> pl.LazyFrame | None: + return self.output_store.get_all_records_as_polars(self.store_path, flush=flush) + + @property + def lazy_df(self) -> pl.LazyFrame | None: + lazydf = self.output_store.get_all_records_as_polars(self.store_path) + if lazydf is None: + return None + if self.tag_keys is None or self.packet_keys is None: + raise ValueError( + "CachedKernelWrapper has no tag keys or packet keys defined, and currently this is not supported" + ) + return lazydf.select(self.tag_keys + self.packet_keys) + + @property + def df(self) -> pl.DataFrame | None: + lazy_df = self.lazy_df + if lazy_df is None: + return None + return lazy_df.collect() + + def reset_cache(self): + self._cache_computed = False + + +class FunctionPodInvocationWrapper(KernelInvocationWrapper, Pod): + """ + Convenience class to wrap a function pod, providing default pass-through + implementations + """ + + def __init__( + self, function_pod: FunctionPod, input_streams: Collection[SyncStream], **kwargs + ): + # note that this would be an alias to the self.kernel but here explicitly taken as function_pod + # for better type hints + # MRO will be KernelInvocationWrapper -> Pod -> Kernel + super().__init__(function_pod, input_streams, **kwargs) + self.function_pod = function_pod + + def forward(self, *streams: SyncStream, **kwargs) -> SyncStream: + resolved_streams = self.resolve_input_streams(*streams) + return super().forward(*resolved_streams, **kwargs) + + def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet | None]: + return self.function_pod.call(tag, packet) + + # =============pass through methods/properties to the underlying function pod============= + + def set_active(self, active=True): + """ + Set the active state of the function pod. + """ + self.function_pod.set_active(active) + + def is_active(self) -> bool: + """ + Check if the function pod is active. + """ + return self.function_pod.is_active() + + +class CachedFunctionPodWrapper(FunctionPodInvocationWrapper, Source): + def __init__( + self, + function_pod: FunctionPod, + input_streams: Collection[SyncStream], + output_store: ArrowDataStore, + tag_store: ArrowDataStore | None = None, + label: str | None = None, + store_path_prefix: tuple[str, ...] = (), + output_store_path_prefix: tuple[str, ...] = (), + tag_store_path_prefix: tuple[str, ...] = (), + skip_memoization_lookup: bool = False, + skip_memoization: bool = False, + skip_tag_record: bool = False, + error_handling: Literal["raise", "ignore", "warn"] = "raise", + object_hasher: ObjectHasher | None = None, + arrow_hasher: ArrowHasher | None = None, + registry: SemanticTypeRegistry | None = None, + **kwargs, + ) -> None: + super().__init__( + function_pod, + input_streams, + label=label, + error_handling=error_handling, + **kwargs, + ) + self.output_store_path_prefix = store_path_prefix + output_store_path_prefix + self.tag_store_path_prefix = store_path_prefix + tag_store_path_prefix + + self.output_store = output_store + self.tag_store = tag_store + + self.skip_memoization_lookup = skip_memoization_lookup + self.skip_memoization = skip_memoization + self.skip_tag_record = skip_tag_record + + # These are configurable but are not expected to be modified except for special circumstances + # Here I'm assigning to the hidden properties directly to avoid triggering setters + if object_hasher is None: + object_hasher = get_default_object_hasher() + self._object_hasher = object_hasher + if arrow_hasher is None: + arrow_hasher = get_default_arrow_hasher() + self._arrow_hasher = arrow_hasher + if registry is None: + registry = default_registry + self._registry = registry + + # compute and cache properties and converters for efficiency + self.update_cached_values() + self._cache_computed = False + + @property + def tag_keys(self) -> tuple[str, ...]: + if self._tag_keys is None: + raise ValueError("Tag keys are not set, cannot return tag keys") + return self._tag_keys + + @property + def output_keys(self) -> tuple[str, ...]: + if self._output_keys is None: + raise ValueError("Output keys are not set, cannot return output keys") + return self._output_keys + + @property + def object_hasher(self) -> ObjectHasher: + return self._object_hasher + + @object_hasher.setter + def object_hasher(self, object_hasher: ObjectHasher | None = None): + if object_hasher is None: + object_hasher = get_default_object_hasher() + self._object_hasher = object_hasher + # hasher changed -- trigger recomputation of properties that depend on object hasher + self.update_cached_values() + + @property + def arrow_hasher(self) -> ArrowHasher: + return self._arrow_hasher + + @arrow_hasher.setter + def arrow_hasher(self, arrow_hasher: ArrowHasher | None = None): + if arrow_hasher is None: + arrow_hasher = get_default_arrow_hasher() + self._arrow_hasher = arrow_hasher + # hasher changed -- trigger recomputation of properties that depend on arrow hasher + self.update_cached_values() + + @property + def registry(self) -> SemanticTypeRegistry: + return self._registry + + @registry.setter + def registry(self, registry: SemanticTypeRegistry | None = None): + if registry is None: + registry = default_registry + self._registry = registry + # registry changed -- trigger recomputation of properties that depend on registry + self.update_cached_values() + + def update_cached_values(self) -> None: + self.function_pod_hash = self.object_hasher.hash_to_hex( + self.function_pod, prefix_hasher_id=True + ) + self.node_hash = self.object_hasher.hash_to_hex(self, prefix_hasher_id=True) + self.input_typespec, self.output_typespec = ( + self.function_pod.get_function_typespecs() + ) + tag_keys, output_keys = self.keys(trigger_run=False) + + if tag_keys is None or output_keys is None: + raise ValueError( + "Currently, cached function pod wrapper can only work with function pods that have keys defined." + ) + self._tag_keys = tuple(tag_keys) + self._output_keys = tuple(output_keys) + + self.tag_typespec, self.output_typespec = self.types(trigger_run=False) + if self.tag_typespec is None or self.output_typespec is None: + raise ValueError( + "Currently, cached function pod wrapper can only work with function pods that have typespecs defined." + ) + self.input_typespec, self.output_typespec = ( + self.function_pod.get_function_typespecs() + ) + + self.input_converter = packets.PacketConverter( + self.input_typespec, self.registry, include_source_info=False + ) + self.output_converter = packets.PacketConverter( + self.output_typespec, self.registry, include_source_info=True + ) + + input_packet_source_typespec = { + f"_source_info_{k}": str for k in self.input_typespec + } + + # prepare typespec for tag record: __packet_key, tag, input packet source_info, + tag_record_typespec = { + "__packet_key": str, + **self.tag_typespec, + **input_packet_source_typespec, + } + self.tag_record_converter = packets.PacketConverter( + tag_record_typespec, self.registry, include_source_info=False + ) + + def reset_cache(self): + self._cache_computed = False + + def generator_completion_hook(self, n_computed: int) -> None: + """ + Hook to be called when the generator is completed. + """ + logger.info(f"Results cached for {self}") + self._cache_computed = True + + def forward(self, *streams: SyncStream, **kwargs) -> SyncStream: + if self._cache_computed: + logger.info(f"Returning cached outputs for {self}") + lazy_df = self.get_all_entries_with_tags(keep_hidden_fields=True) + if lazy_df is not None: + if self.tag_keys is None: + raise ValueError("Tag keys are not set, cannot return PolarsStream") + return PolarsStream( + lazy_df.collect(), self.tag_keys, packet_keys=self.output_keys + ) + else: + return EmptyStream(tag_keys=self.tag_keys, packet_keys=self.output_keys) + logger.info(f"Computing and caching outputs for {self}") + return super().forward(*streams, **kwargs) + + def get_packet_key(self, packet: Packet) -> str: + return self.arrow_hasher.hash_table( + self.input_converter.from_python_packet_to_arrow_table(packet), + prefix_hasher_id=True, + ) + + @property + def pod_source_info(self): + return self.function_pod.function_name, self.function_pod_hash + + @property + def node_source_info(self): + return self.label, self.node_hash + + @property + def output_store_path(self) -> tuple[str, ...]: + """ + Returns the path prefix for the output store. + This is used to store and retrieve the outputs from the output store. + """ + return self.output_store_path_prefix + self.pod_source_info + + @property + def tag_store_path(self) -> tuple[str, ...]: + """ + Returns the path prefix for the tag store. + This is used to store and retrieve the tags associated with memoized packets. + """ + return self.tag_store_path_prefix + self.node_source_info + + def is_memoized(self, packet: Packet) -> bool: + return self.retrieve_memoized(packet) is not None + + def add_pipeline_record(self, tag: Tag, packet: Packet) -> Tag: + """ + Record the tag for the packet in the record store. + This is used to keep track of the tags associated with memoized packets. + """ + return self._add_pipeline_record_with_packet_key( + tag, self.get_packet_key(packet), packet.source_info + ) + + def _add_pipeline_record_with_packet_key( + self, tag: Tag, packet_key: str, packet_source_info: dict[str, str | None] + ) -> Tag: + if self.tag_store is None: + raise ValueError("Recording of tag requires tag_store but none provided") + + combined_info = dict(tag) # ensure we don't modify the original tag + combined_info["__packet_key"] = packet_key + for k, v in packet_source_info.items(): + combined_info[f"_source_info_{k}"] = v + + table = self.tag_record_converter.from_python_packet_to_arrow_table( + combined_info + ) + + entry_hash = self.arrow_hasher.hash_table(table, prefix_hasher_id=True) + + # TODO: add error handling + # check if record already exists: + retrieved_table = self.tag_store.get_record( + self.tag_store_path, entry_hash, flush=False + ) + if retrieved_table is None: + self.tag_store.add_record(self.tag_store_path, entry_hash, table) + + return tag + + def retrieve_memoized(self, packet: Packet) -> Packet | None: + """ + Retrieve a memoized packet from the data store. + Returns None if no memoized packet is found. + """ + logger.debug("Retrieving memoized packet") + return self._retrieve_memoized_with_packet_key(self.get_packet_key(packet)) + + def _retrieve_memoized_with_packet_key(self, packet_key: str) -> Packet | None: + """ + Retrieve a memoized result packet from the data store, looking up by the packet key + Returns None if no memoized packet is found. + """ + logger.debug(f"Retrieving memoized packet with key {packet_key}") + arrow_table = self.output_store.get_record( + self.output_store_path, + packet_key, + flush=False, + ) + if arrow_table is None: + return None + packets = self.output_converter.from_arrow_table_to_python_packets(arrow_table) + # since memoizing single packet, it should only contain one packet + assert len(packets) == 1, ( + f"Memoizing single packet return {len(packets)} packets!" + ) + return packets[0] + + def memoize( + self, + packet: Packet, + output_packet: Packet, + ) -> Packet: + """ + Memoize the output packet in the data store. + Returns the memoized packet. + """ + logger.debug("Memoizing packet") + return self._memoize_with_packet_key( + self.get_packet_key(packet), output_packet.get_composite() + ) + + def _memoize_with_packet_key( + self, packet_key: str, output_packet: PacketLike + ) -> Packet: + """ + Memoize the output packet in the data store, looking up by packet key. + Returns the memoized packet. + """ + logger.debug(f"Memoizing packet with key {packet_key}") + # TODO: this logic goes through the entire store and retrieve cycle with two conversions + # consider simpler alternative + packets = self.output_converter.from_arrow_table_to_python_packets( + self.output_store.add_record( + self.output_store_path, + packet_key, + self.output_converter.from_python_packet_to_arrow_table(output_packet), + ) + ) + # since passed in a single packet, it should only return a single packet + assert len(packets) == 1, ( + f"Memoizing single packet returned {len(packets)} packets!" + ) + packet = packets[0] + # TODO: reconsider the right place to attach this information + # attach provenance information + return Packet(packet) + + def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet | None]: + packet_key = "" + if ( + not self.skip_tag_record + or not self.skip_memoization_lookup + or not self.skip_memoization + ): + packet_key = self.get_packet_key(packet) + + output_packet = None + if not self.skip_memoization_lookup: + output_packet = self._retrieve_memoized_with_packet_key( + packet_key, + ) + if output_packet is not None: + logger.debug( + f"Memoized output for {packet} with {packet_key} found, skipping computation" + ) + else: + logger.debug( + f"Memoized output for packet {packet} with {packet_key} not found" + ) + + if output_packet is None: + # TODO: revisit the logic around active state and how to use it + tag, output_packet = self.function_pod.call(tag, packet) + if output_packet is not None and not self.skip_memoization: + # output packet may be modified by the memoization process + # e.g. if the output is a file, the path may be changed + # add source info to the output packet + source_info = { + k: "-".join(self.pod_source_info) + "-" + packet_key + ":" + str(k) + for k in output_packet.source_info + } + # TODO: fix and make this not access protected field directly + output_packet.source_info = source_info + output_packet = self._memoize_with_packet_key(packet_key, output_packet) # type: ignore + + if output_packet is None: + if self.is_active(): + logger.warning( + f"Function pod {self.function_pod.function_name} returned None for packet {packet} despite being active" + ) + return tag, None + + # result was successfully computed/retrieved -- save the tag + if not self.skip_tag_record and self.tag_store is not None: + self._add_pipeline_record_with_packet_key( + tag, packet_key, packet.source_info + ) + + return tag, output_packet + + def get_all_outputs(self) -> pl.LazyFrame | None: + return self.output_store.get_all_records_as_polars(self.output_store_path) + + def get_all_tags(self, with_packet_id: bool = False) -> pl.LazyFrame | None: + if self.tag_store is None: + raise ValueError("Tag store is not set, no tag record can be retrieved") + data = self.tag_store.get_all_records_as_polars(self.tag_store_path) + if not with_packet_id: + return data.drop("__packet_key") if data is not None else None + return data + + def get_all_entries_with_tags( + self, keep_hidden_fields: bool = False + ) -> pl.LazyFrame | None: + """ + Retrieve all entries from the tag store with their associated tags. + Returns a DataFrame with columns for tag and packet key. + """ + if self.tag_store is None: + raise ValueError("Tag store is not set, no tag record can be retrieved") + + tag_records = self.tag_store.get_all_records_as_polars(self.tag_store_path) + if tag_records is None: + return None + result_packets = self.output_store.get_records_by_ids_as_polars( + self.output_store_path, + tag_records.collect()["__packet_key"], + preserve_input_order=True, + ) + if result_packets is None: + return None + + pl_df = pl.concat([tag_records, result_packets], how="horizontal").drop( + ["__packet_key"] + ) + if not keep_hidden_fields: + pl_df = pl_df.select(self.tag_keys + self.output_keys) + return pl_df.lazy() + + @property + def df(self) -> pl.DataFrame | None: + lazy_df = self.lazy_df + if lazy_df is None: + return None + return lazy_df.collect() + + @property + def lazy_df(self) -> pl.LazyFrame | None: + return self.get_all_entries_with_tags() + + @property + def tags(self) -> pl.DataFrame | None: + data = self.get_all_tags() + if data is None: + return None + + return data.collect() + + @property + def outputs(self) -> pl.DataFrame | None: + """ + Retrieve all outputs from the result store as a DataFrame. + Returns None if no outputs are available. + """ + data = self.get_all_outputs() + if data is None: + return None + + return data.collect() + + +class DummyFunctionPod(Pod): + def __init__(self, function_name="dummy", **kwargs): + super().__init__(**kwargs) + self.function_name = function_name + + def set_active(self, active: bool = True): + # no-op + pass + + def is_active(self) -> bool: + return False + + def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet | None]: + raise NotImplementedError( + "DummyFunctionPod cannot be called, it is only used to access previously stored tags and outputs." + ) + + +# TODO: Create this instead using compositional pattern +class DummyCachedFunctionPod(CachedFunctionPodWrapper): + """ + Dummy for a cached function pod. This is convenient to just allow the user to access + previously stored function pod tags and outputs without requiring instantiating the identical + function used for computation. + + Consequently, this function pod CANNOT be used to compute and insert new entries into the storage. + """ + + def __init__(self, source_pod: CachedFunctionPodWrapper): + self._pod_source_info = source_pod.pod_source_info + self._node_source_info = source_pod.node_source_info + self.output_store = source_pod.output_store + self.tag_store = source_pod.tag_store + self.function_pod = DummyFunctionPod(source_pod.function_pod.function_name) + + @property + def pod_source_info(self) -> tuple[str, str]: + return self._pod_source_info + + @property + def node_source_info(self) -> tuple[str, str]: + return self._node_source_info + + +class Node(KernelInvocationWrapper, Source): + def __init__(self, kernel: Kernel, input_nodes: Collection["Node"], **kwargs): + """ + Create a node that wraps a kernel and provides a Node interface. + This is useful for creating nodes in a pipeline that can be executed. + """ + return super().__init__(kernel, input_nodes, **kwargs) + + def reset_cache(self) -> None: ... + + +class KernelNode(CachedKernelWrapper, Node): + """ + A node that wraps a Kernel and provides a Node interface. + This is useful for creating nodes in a pipeline that can be executed. + """ + + +class FunctionPodNode(CachedFunctionPodWrapper, Node): + """ + A node that wraps a FunctionPod and provides a Node interface. + This is useful for creating nodes in a pipeline that can be executed. + """ diff --git a/src/orcapod/pipeline/legacy_pipeline.py b/src/orcapod/pipeline/legacy_pipeline.py new file mode 100644 index 0000000..8c931f7 --- /dev/null +++ b/src/orcapod/pipeline/legacy_pipeline.py @@ -0,0 +1,257 @@ +from collections import defaultdict +from collections.abc import Collection +import logging +import pickle +import sys +import time +from pathlib import Path +from typing import Any + + +from orcapod.core import Invocation, Kernel, SyncStream +from orcapod.core.pod import FunctionPod +from orcapod.pipeline.legacy_nodes import KernelNode, FunctionPodNode, Node + +from orcapod.core.tracker import GraphTracker +from orcapod.stores import ArrowDataStore + +logger = logging.getLogger(__name__) + + +class SerializationError(Exception): + """Raised when pipeline cannot be serialized""" + + pass + + +class Pipeline(GraphTracker): + """ + Enhanced pipeline that tracks operations and provides queryable views. + Replaces the old Tracker with better persistence and view capabilities. + """ + + def __init__( + self, + name: str | tuple[str, ...], + pipeline_store: ArrowDataStore, + results_store: ArrowDataStore | None = None, + auto_compile: bool = True, + ) -> None: + super().__init__() + if not isinstance(name, tuple): + name = (name,) + self.name = name + self.pipeline_store_path_prefix = self.name + self.results_store_path_prefix = () + if results_store is None: + if pipeline_store is None: + raise ValueError( + "Either pipeline_store or results_store must be provided" + ) + results_store = pipeline_store + self.results_store_path_prefix = self.name + ("_results",) + + self.pipeline_store = pipeline_store + self.results_store = results_store + self.nodes = {} + self.auto_compile = auto_compile + self._dirty = False + self._ordered_nodes = [] # Track order of invocations + + # Core Pipeline Operations + def save(self, path: Path | str) -> None: + """Save complete pipeline state - named functions only""" + path = Path(path) + + # Validate serializability first + self._validate_serializable() + + state = { + "name": self.name, + "invocation_lut": self.invocation_lut, + "metadata": { + "created_at": time.time(), + "python_version": sys.version_info[:2], + "orcapod_version": "0.1.0", # TODO: make this dynamic + }, + } + + # Atomic write + temp_path = path.with_suffix(".tmp") + try: + with open(temp_path, "wb") as f: + pickle.dump(state, f, protocol=pickle.HIGHEST_PROTOCOL) + temp_path.replace(path) + logger.info(f"Pipeline '{self.name}' saved to {path}") + except Exception: + if temp_path.exists(): + temp_path.unlink() + raise + + def flush(self) -> None: + """Flush all pending writes to the data store""" + self.pipeline_store.flush() + self.results_store.flush() + logger.info("Pipeline stores flushed") + + def record(self, invocation: Invocation) -> None: + """ + Record an invocation in the pipeline. + This method is called automatically by the Kernel when an operation is invoked. + """ + super().record(invocation) + self._dirty = True + + def wrap_invocation( + self, kernel: Kernel, input_nodes: Collection[Node], label: str | None = None + ) -> Node: + if isinstance(kernel, FunctionPod): + return FunctionPodNode( + kernel, + input_nodes, + output_store=self.results_store, + tag_store=self.pipeline_store, + output_store_path_prefix=self.results_store_path_prefix, + tag_store_path_prefix=self.pipeline_store_path_prefix, + label=label, + ) + return KernelNode( + kernel, + input_nodes, + output_store=self.pipeline_store, + store_path_prefix=self.pipeline_store_path_prefix, + label=label, + ) + + def compile(self): + import networkx as nx + + G = self.generate_graph() + + # Proposed labels for each Kernel in the graph + # If name collides, unique name is generated by appending an index + proposed_labels = defaultdict(list) + node_lut = {} + edge_lut: dict[SyncStream, Node] = {} + ordered_nodes = [] + for invocation in nx.topological_sort(G): + # map streams to the new streams based on Nodes + input_nodes = [edge_lut[stream] for stream in invocation.streams] + label = None + if invocation.has_assigned_label: + # If the invocation has a label, use it directly + label = invocation.label + new_node = self.wrap_invocation(invocation.kernel, input_nodes, label=label) + + # register the new node against the original invocation + node_lut[invocation] = new_node + ordered_nodes.append(new_node) + # register the new node in the proposed labels -- if duplicates occur, will resolve later + proposed_labels[new_node.label].append(new_node) + + for edge in G.out_edges(invocation): + edge_lut[G.edges[edge]["stream"]] = new_node + + self._ordered_nodes = ordered_nodes + + # resolve duplicates in proposed_labels + labels_to_nodes = {} + for label, nodes in proposed_labels.items(): + if len(nodes) > 1: + # If multiple nodes have the same label, append index to make it unique + for idx, node in enumerate(nodes): + node.label = f"{label}_{idx}" + labels_to_nodes[node.label] = node + else: + # If only one node, keep the original label + nodes[0].label = label + labels_to_nodes[label] = nodes[0] + + # store as pipeline's nodes attribute + self.nodes = labels_to_nodes + self._dirty = False + return node_lut, edge_lut, proposed_labels, labels_to_nodes + + def __exit__(self, exc_type, exc_val, ext_tb): + super().__exit__(exc_type, exc_val, ext_tb) + if self.auto_compile: + self.compile() + + def __getattr__(self, item: str) -> Any: + """Allow direct access to pipeline attributes""" + if item in self.nodes: + return self.nodes[item] + raise AttributeError(f"Pipeline has no attribute '{item}'") + + def __dir__(self): + # Include both regular attributes and dynamic ones + return list(super().__dir__()) + list(self.nodes.keys()) + + def rename(self, old_name: str, new_name: str) -> None: + """ + Rename a node in the pipeline. + This will update the label and the internal mapping. + """ + if old_name not in self.nodes: + raise KeyError(f"Node '{old_name}' does not exist in the pipeline.") + if new_name in self.nodes: + raise KeyError(f"Node '{new_name}' already exists in the pipeline.") + node = self.nodes[old_name] + del self.nodes[old_name] + node.label = new_name + self.nodes[new_name] = node + logger.info(f"Node '{old_name}' renamed to '{new_name}'") + + def run(self, full_sync: bool = False) -> None: + """ + Run the pipeline, compiling it if necessary. + This method is a no-op if auto_compile is False. + """ + if self.auto_compile and self._dirty: + self.compile() + + # Run in topological order + for node in self._ordered_nodes: + if full_sync: + node.reset_cache() + node.flow() + + self.flush() + + @classmethod + def load(cls, path: Path | str) -> "Pipeline": + """Load complete pipeline state""" + path = Path(path) + + with open(path, "rb") as f: + state = pickle.load(f) + + pipeline = cls(state["name"], state["output_store"]) + pipeline.invocation_lut = state["invocation_lut"] + + logger.info(f"Pipeline '{pipeline.name}' loaded from {path}") + return pipeline + + def _validate_serializable(self) -> None: + """Ensure pipeline contains only serializable operations""" + issues = [] + + for operation, invocations in self.invocation_lut.items(): + # Check for lambda functions + if hasattr(operation, "function"): + func = getattr(operation, "function", None) + if func and hasattr(func, "__name__") and func.__name__ == "": + issues.append(f"Lambda function in {operation.__class__.__name__}") + + # Test actual serializability + try: + pickle.dumps(operation) + except Exception as e: + issues.append(f"Non-serializable operation {operation}: {e}") + + if issues: + raise SerializationError( + "Pipeline contains non-serializable elements:\n" + + "\n".join(f" - {issue}" for issue in issues) + + "\n\nOnly named functions are supported for serialization." + ) diff --git a/src/orcapod/pipeline/nodes.py b/src/orcapod/pipeline/nodes.py new file mode 100644 index 0000000..7372175 --- /dev/null +++ b/src/orcapod/pipeline/nodes.py @@ -0,0 +1,304 @@ +from ast import Not +from collections.abc import Collection, Iterator +from datetime import datetime +from orcapod.data.kernels import WrappedKernel, TrackedKernelBase +from orcapod.data.pods import ArrowDataStore, CachedPod +from orcapod.protocols import data_protocols as dp +from orcapod.data.streams import PodStream +from orcapod.types import TypeSpec +from orcapod.utils.lazy_module import LazyModule +from typing import TYPE_CHECKING, Any +from orcapod.data.system_constants import orcapod_constants as constants +from orcapod.utils import arrow_utils + +if TYPE_CHECKING: + import pyarrow as pa +else: + pa = LazyModule("pyarrow") + + +class Node( + TrackedKernelBase, +): + """ + Mixin class for pipeline nodes + """ + + def __init__( + self, + input_streams: Collection[dp.Stream], + pipeline_store: ArrowDataStore, + pipeline_path_prefix: tuple[str, ...] = (), + **kwargs, + ): + super().__init__(**kwargs) + self._cached_stream: dp.LiveStream | None = None + self.input_streams = tuple(input_streams) + self.pipeline_store = pipeline_store + self.pipeline_path_prefix = pipeline_path_prefix + # compute invocation hash - note that empty () is passed into identity_structure to signify + # identity structure of invocation with no input streams + self.invocation_hash = self.data_context.object_hasher.hash_to_hex( + self.identity_structure(()), prefix_hasher_id=True + ) + + @property + def pipeline_path(self) -> tuple[str, ...]: + """ + Return the path to the pipeline run records. + This is used to store the run-associated tag info. + """ + return self.pipeline_path_prefix + self.kernel_id + (self.invocation_hash,) + + def validate_inputs(self, *processed_streams: dp.Stream) -> None: + pass + + def forward(self, *streams: dp.Stream) -> dp.Stream: + if len(streams) > 0: + raise NotImplementedError( + "At this moment, Node does not yet support handling additional input streams." + ) + # TODO: re-evaluate the use here + # super().validate_inputs(*self.input_streams) + return super().forward(*self.input_streams) + + def __call__(self, *args, **kwargs) -> dp.LiveStream: + if self._cached_stream is None: + self._cached_stream = super().__call__(*args, **kwargs) + return self._cached_stream + + # properties and methods to act as a dp.Stream + @property + def source(self) -> dp.Kernel | None: + return self + + @property + def upstreams(self) -> tuple[dp.Stream, ...]: + return () + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + return self().keys() + + def types(self) -> tuple[TypeSpec, TypeSpec]: + return self().types() + + @property + def last_modified(self) -> datetime | None: + return self().last_modified + + @property + def is_current(self) -> bool: + return self().is_current + + def __iter__(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + return self().__iter__() + + def iter_packets(self) -> Iterator[tuple[dp.Tag, dp.Packet]]: + return self().iter_packets() + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> "pa.Table": + return self().as_table( + include_data_context=include_data_context, + include_source=include_source, + include_content_hash=include_content_hash, + ) + + def flow(self) -> Collection[tuple[dp.Tag, dp.Packet]]: + return self().flow() + + +class KernelNode(Node, WrappedKernel): + """ + A node in the pipeline that represents a kernel. + This node can be used to execute the kernel and process data streams. + """ + + def __init__( + self, + kernel: dp.Kernel, + input_streams: Collection[dp.Stream], + pipeline_store: ArrowDataStore, + pipeline_path_prefix: tuple[str, ...] = (), + **kwargs, + ) -> None: + super().__init__( + kernel=kernel, + input_streams=input_streams, + pipeline_store=pipeline_store, + pipeline_path_prefix=pipeline_path_prefix, + **kwargs, + ) + + def __repr__(self): + return f"KernelNode(kernel={self.kernel!r})" + + def __str__(self): + return f"KernelNode:{self.kernel!s}" + + def identity_structure(self, streams: Collection[dp.Stream] | None = None) -> Any: + """ + Return the identity structure of the node. + This is used to compute the invocation hash. + """ + # construct identity structure from the node's information and the + # contained kernel + if streams is not None and len(streams) > 0: + raise NotImplementedError( + "At this moment, Node does not yet support handling additional input streams." + ) + return self.kernel.identity_structure(self.input_streams) + + +class PodNode(Node, CachedPod): + def __init__( + self, + pod: dp.Pod, + input_streams: Collection[dp.Stream], + pipeline_store: ArrowDataStore, + result_store: ArrowDataStore | None = None, + record_path_prefix: tuple[str, ...] = (), + pipeline_path_prefix: tuple[str, ...] = (), + **kwargs, + ) -> None: + super().__init__( + pod=pod, + result_store=result_store, + record_path_prefix=record_path_prefix, + input_streams=input_streams, + pipeline_store=pipeline_store, + pipeline_path_prefix=pipeline_path_prefix, + **kwargs, + ) + self.pipeline_store = pipeline_store + # self.input_streams = tuple(input_streams) + + def __repr__(self): + return f"PodNode(pod={self.pod!r})" + + def __str__(self): + return f"PodNode:{self.pod!s}" + + def call( + self, + tag: dp.Tag, + packet: dp.Packet, + skip_record_check: bool = False, + skip_recording: bool = False, + overwrite_existing: bool = False, + ) -> tuple[dp.Tag, dp.Packet | None]: + tag, output_packet = super().call( + tag, + packet, + skip_record_check=skip_record_check, + skip_recording=skip_recording, + overwrite_existing=overwrite_existing, + ) + if output_packet is not None: + retrieved = ( + output_packet.get_meta_value(self.DATA_RETRIEVED_FLAG) is not None + ) + # add pipeline record if the output packet is not None + self.add_pipeline_record(tag, packet, retrieved=retrieved) + return tag, output_packet + + def add_pipeline_record( + self, tag: dp.Tag, input_packet: dp.Packet, retrieved: bool | None = None + ) -> None: + # combine dp.Tag with packet content hash to compute entry hash + tag_with_hash = tag.as_table().append_column( + self.PACKET_HASH_COLUMN, + pa.array([input_packet.content_hash()], type=pa.large_string()), + ) + entry_id = self.data_context.arrow_hasher.hash_table( + tag_with_hash, prefix_hasher_id=True + ) + + existing_record = self.pipeline_store.get_record_by_id( + self.pipeline_path, + entry_id, + ) + + if existing_record is not None: + # if the record already exists, return it + return + + # no record matching, so construct the full record + + input_packet_info = ( + input_packet.as_table( + include_source=True, + ) + .append_column( + f"{constants.META_PREFIX}input_packet{constants.CONTEXT_KEY}", + pa.array([input_packet.data_context_key], type=pa.large_string()), + ) + .append_column( + self.DATA_RETRIEVED_FLAG, + pa.array([retrieved], type=pa.bool_()), + ) + .drop(input_packet.keys()) + ) + + combined_record = arrow_utils.hstack_tables(tag_with_hash, input_packet_info) + + self.pipeline_store.add_record( + self.pipeline_path, + entry_id, + combined_record, + ignore_duplicates=False, + ) + + def _get_all_records(self) -> "pa.Table | None": + results = self.result_store.get_all_records( + self.record_path, record_id_column=self.PACKET_HASH_COLUMN + ) + + if self.pipeline_store is None: + raise ValueError( + "Pipeline store is not configured, cannot retrieve tag info" + ) + taginfo = self.pipeline_store.get_all_records( + self.record_path, + ) + + if results is None or taginfo is None: + return None + + tag_columns = [ + c + for c in taginfo.column_names + if not c.startswith(constants.META_PREFIX) + and not c.startswith(constants.SOURCE_PREFIX) + ] + + packet_columns = [ + c for c in results.column_names if c != self.PACKET_HASH_COLUMN + ] + + # TODO: do not hardcode the join keys + joined_info = taginfo.join( + results, + self.PACKET_HASH_COLUMN, + join_type="inner", + ) + + joined_info = joined_info.select([*tag_columns, *packet_columns]) + return joined_info + + def identity_structure(self, streams: Collection[dp.Stream] | None = None) -> Any: + """ + Return the identity structure of the node. + This is used to compute the invocation hash. + """ + # construct identity structure from the node's information and the + # contained kernel + if streams is not None and len(streams) > 0: + raise NotImplementedError( + "At this moment, Node does not yet support handling additional input streams." + ) + return self.pod.identity_structure(self.input_streams) diff --git a/src/orcapod/pipeline/pipeline.py b/src/orcapod/pipeline/pipeline.py deleted file mode 100644 index f160f2b..0000000 --- a/src/orcapod/pipeline/pipeline.py +++ /dev/null @@ -1,730 +0,0 @@ -import json -import logging -import pickle -import sys -import time -from abc import ABC, abstractmethod -from pathlib import Path -from typing import Any, Protocol, runtime_checkable - -import networkx as nx -import pandas as pd - -from orcapod.core.base import Invocation, Kernel -from orcapod.hashing import hash_to_hex -from orcapod.core.tracker import GraphTracker - -logger = logging.getLogger(__name__) - - -class SerializationError(Exception): - """Raised when pipeline cannot be serialized""" - - pass - - -class Pipeline(GraphTracker): - """ - Enhanced pipeline that tracks operations and provides queryable views. - Replaces the old Tracker with better persistence and view capabilities. - """ - - def __init__(self, name: str | None = None): - super().__init__() - self.name = name or f"pipeline_{id(self)}" - self._view_registry: dict[str, "PipelineView"] = {} - self._cache_dir = Path(".pipeline_cache") / self.name - self._cache_dir.mkdir(parents=True, exist_ok=True) - - # Core Pipeline Operations - def save(self, path: Path | str) -> None: - """Save complete pipeline state - named functions only""" - path = Path(path) - - # Validate serializability first - self._validate_serializable() - - state = { - "name": self.name, - "invocation_lut": self.invocation_lut, - "metadata": { - "created_at": time.time(), - "python_version": sys.version_info[:2], - "orcabridge_version": "0.1.0", # You can make this dynamic - }, - } - - # Atomic write - temp_path = path.with_suffix(".tmp") - try: - with open(temp_path, "wb") as f: - pickle.dump(state, f, protocol=pickle.HIGHEST_PROTOCOL) - temp_path.replace(path) - logger.info(f"Pipeline '{self.name}' saved to {path}") - except Exception: - if temp_path.exists(): - temp_path.unlink() - raise - - @classmethod - def load(cls, path: Path | str) -> "Pipeline": - """Load complete pipeline state""" - path = Path(path) - - with open(path, "rb") as f: - state = pickle.load(f) - - pipeline = cls(state["name"]) - pipeline.invocation_lut = state["invocation_lut"] - - logger.info(f"Pipeline '{pipeline.name}' loaded from {path}") - return pipeline - - def _validate_serializable(self) -> None: - """Ensure pipeline contains only serializable operations""" - issues = [] - - for operation, invocations in self.invocation_lut.items(): - # Check for lambda functions - if hasattr(operation, "function"): - func = getattr(operation, "function", None) - if func and hasattr(func, "__name__") and func.__name__ == "": - issues.append(f"Lambda function in {operation.__class__.__name__}") - - # Test actual serializability - try: - pickle.dumps(operation) - except Exception as e: - issues.append(f"Non-serializable operation {operation}: {e}") - - if issues: - raise SerializationError( - "Pipeline contains non-serializable elements:\n" - + "\n".join(f" - {issue}" for issue in issues) - + "\n\nOnly named functions are supported for serialization." - ) - - # View Management - def as_view( - self, renderer: "ViewRenderer", view_id: str | None = None, **kwargs - ) -> "PipelineView": - """Get a view of this pipeline using the specified renderer""" - view_id = ( - view_id - or f"{renderer.__class__.__name__.lower()}_{len(self._view_registry)}" - ) - - if view_id not in self._view_registry: - self._view_registry[view_id] = renderer.create_view( - self, view_id=view_id, **kwargs - ) - return self._view_registry[view_id] - - def as_dataframe(self, view_id: str = "default", **kwargs) -> "PandasPipelineView": - """Convenience method for pandas DataFrame view""" - return self.as_view(PandasViewRenderer(), view_id=view_id, **kwargs) - - def as_graph(self) -> nx.DiGraph: - """Get the computation graph""" - return self.generate_graph() - - # Combined save/load with views - def save_with_views(self, base_path: Path | str) -> dict[str, Path]: - """Save pipeline and all its views together""" - base_path = Path(base_path) - base_path.mkdir(parents=True, exist_ok=True) - - saved_files = {} - - # Save pipeline itself - pipeline_path = base_path / "pipeline.pkl" - self.save(pipeline_path) - saved_files["pipeline"] = pipeline_path - - # Save all views - for view_id, view in self._view_registry.items(): - view_path = base_path / f"view_{view_id}.pkl" - view.save(view_path, include_pipeline=False) - saved_files[f"view_{view_id}"] = view_path - - # Save manifest - manifest = { - "pipeline_file": "pipeline.pkl", - "views": { - view_id: f"view_{view_id}.pkl" for view_id in self._view_registry.keys() - }, - "created_at": time.time(), - "pipeline_name": self.name, - } - - manifest_path = base_path / "manifest.json" - with open(manifest_path, "w") as f: - json.dump(manifest, f, indent=2) - saved_files["manifest"] = manifest_path - - return saved_files - - @classmethod - def load_with_views( - cls, base_path: Path | str - ) -> tuple["Pipeline", dict[str, "PipelineView"]]: - """Load pipeline and all its views""" - base_path = Path(base_path) - - # Load manifest - manifest_path = base_path / "manifest.json" - with open(manifest_path, "r") as f: - manifest = json.load(f) - - # Load pipeline - pipeline_path = base_path / manifest["pipeline_file"] - pipeline = cls.load(pipeline_path) - - # Load views with appropriate renderers - renderers = { - "PandasPipelineView": PandasViewRenderer(), - "DataJointPipelineView": DataJointViewRenderer(None), # Would need schema - } - - views = {} - for view_id, view_file in manifest["views"].items(): - view_path = base_path / view_file - - # Load view data to determine type - with open(view_path, "rb") as f: - view_data = pickle.load(f) - - # Find appropriate renderer - view_type = view_data.get("view_type", "PandasPipelineView") - if view_type in renderers and renderers[view_type].can_load_view(view_data): - # Load with appropriate view class - if view_type == "PandasPipelineView": - view = PandasPipelineView.load(view_path, pipeline) - else: - view = DataJointPipelineView.load(view_path, pipeline) - else: - # Default to pandas view - view = PandasPipelineView.load(view_path, pipeline) - - views[view_id] = view - pipeline._view_registry[view_id] = view - - return pipeline, views - - def get_stats(self) -> dict[str, Any]: - """Get pipeline statistics""" - total_operations = len(self.invocation_lut) - total_invocations = sum(len(invs) for invs in self.invocation_lut.values()) - - operation_types = {} - for operation in self.invocation_lut.keys(): - op_type = operation.__class__.__name__ - operation_types[op_type] = operation_types.get(op_type, 0) + 1 - - return { - "name": self.name, - "total_operations": total_operations, - "total_invocations": total_invocations, - "operation_types": operation_types, - "views": list(self._view_registry.keys()), - } - - -# View Renderer Protocol -@runtime_checkable -class ViewRenderer(Protocol): - """Protocol for all view renderers - uses structural typing""" - - def create_view( - self, pipeline: "Pipeline", view_id: str, **kwargs - ) -> "PipelineView": - """Create a view for the given pipeline""" - ... - - def can_load_view(self, view_data: dict[str, Any]) -> bool: - """Check if this renderer can load the given view data""" - ... - - -class PandasViewRenderer: - """Renderer for pandas DataFrame views""" - - def create_view( - self, pipeline: "Pipeline", view_id: str, **kwargs - ) -> "PandasPipelineView": - return PandasPipelineView(pipeline, view_id=view_id, **kwargs) - - def can_load_view(self, view_data: dict[str, Any]) -> bool: - return view_data.get("view_type") == "PandasPipelineView" - - -class DataJointViewRenderer: - """Renderer for DataJoint views""" - - def __init__(self, schema): - self.schema = schema - - def create_view( - self, pipeline: "Pipeline", view_id: str, **kwargs - ) -> "DataJointPipelineView": - return DataJointPipelineView(pipeline, self.schema, view_id=view_id, **kwargs) - - def can_load_view(self, view_data: dict[str, Any]) -> bool: - return view_data.get("view_type") == "DataJointPipelineView" - - -# Base class for all views -class PipelineView(ABC): - """Base class for all pipeline views""" - - def __init__(self, pipeline: Pipeline, view_id: str): - self.pipeline = pipeline - self.view_id = view_id - self._cache_dir = pipeline._cache_dir / "views" - self._cache_dir.mkdir(parents=True, exist_ok=True) - - @abstractmethod - def save(self, path: Path | str, include_pipeline: bool = True) -> None: - """Save the view""" - pass - - @classmethod - @abstractmethod - def load(cls, path: Path | str, pipeline: Pipeline | None = None) -> "PipelineView": - """Load the view""" - pass - - def _compute_pipeline_hash(self) -> str: - """Compute hash of current pipeline state for validation""" - pipeline_state = [] - for operation, invocations in self.pipeline.invocation_lut.items(): - for invocation in invocations: - pipeline_state.append(invocation.content_hash()) - return hash_to_hex(sorted(pipeline_state)) - - -# Pandas DataFrame-like view -class PandasPipelineView(PipelineView): - """ - Provides a pandas DataFrame-like interface to pipeline metadata. - Focuses on tag information for querying and filtering. - """ - - def __init__( - self, - pipeline: Pipeline, - view_id: str = "pandas_view", - max_records: int = 10000, - sample_size: int = 100, - ): - super().__init__(pipeline, view_id) - self.max_records = max_records - self.sample_size = sample_size - self._cached_data: pd.DataFrame | None = None - self._build_options = {"max_records": max_records, "sample_size": sample_size} - self._hash_to_data_map: dict[str, Any] = {} - - @property - def df(self) -> pd.DataFrame: - """Access the underlying DataFrame, building if necessary""" - if self._cached_data is None: - # Try to load from cache first - cache_path = self._cache_dir / f"{self.view_id}.pkl" - if cache_path.exists(): - try: - loaded_view = self.load(cache_path, self.pipeline) - if self._is_cache_valid(loaded_view): - self._cached_data = loaded_view._cached_data - self._hash_to_data_map = loaded_view._hash_to_data_map - logger.info(f"Loaded view '{self.view_id}' from cache") - return self._cached_data - except Exception as e: - logger.warning(f"Failed to load cached view: {e}") - - # Build from scratch - logger.info(f"Building view '{self.view_id}' from pipeline") - self._cached_data = self._build_metadata() - - # Auto-save after building - try: - self.save(cache_path, include_pipeline=False) - except Exception as e: - logger.warning(f"Failed to cache view: {e}") - - return self._cached_data - - def _build_metadata(self) -> pd.DataFrame: - """Build the metadata DataFrame from pipeline operations""" - metadata_records = [] - total_records = 0 - - for operation, invocations in self.pipeline.invocation_lut.items(): - if total_records >= self.max_records: - logger.warning(f"Hit max_records limit ({self.max_records})") - break - - for invocation in invocations: - try: - # Get sample of outputs, not all - records = self._extract_metadata_from_invocation( - invocation, operation - ) - for record in records: - metadata_records.append(record) - total_records += 1 - if total_records >= self.max_records: - break - - if total_records >= self.max_records: - break - - except Exception as e: - logger.warning(f"Skipping {operation.__class__.__name__}: {e}") - # Create placeholder record - placeholder = self._create_placeholder_record(invocation, operation) - metadata_records.append(placeholder) - total_records += 1 - - if not metadata_records: - # Return empty DataFrame with basic structure - return pd.DataFrame( - columns=[ - "operation_name", - "operation_hash", - "invocation_id", - "created_at", - "packet_keys", - ] - ) - - return pd.DataFrame(metadata_records) - - def _extract_metadata_from_invocation( - self, invocation: Invocation, operation: Kernel - ) -> list[dict[str, Any]]: - """Extract metadata records from a single invocation""" - records = [] - - # Try to get sample outputs from the invocation - try: - # This is tricky - we need to reconstruct the output stream - # For now, we'll create a basic record from what we know - base_record = { - "operation_name": operation.label or operation.__class__.__name__, - "operation_hash": invocation.content_hash(), - "invocation_id": hash(invocation), - "created_at": time.time(), - "operation_type": operation.__class__.__name__, - } - - # Try to get tag and packet info from the operation - try: - tag_keys, packet_keys = invocation.keys() - base_record.update( - { - "tag_keys": list(tag_keys) if tag_keys else [], - "packet_keys": list(packet_keys) if packet_keys else [], - } - ) - except Exception: - base_record.update( - { - "tag_keys": [], - "packet_keys": [], - } - ) - - records.append(base_record) - - except Exception as e: - logger.debug(f"Could not extract detailed metadata from {operation}: {e}") - records.append(self._create_placeholder_record(invocation, operation)) - - return records - - def _create_placeholder_record( - self, invocation: Invocation, operation: Kernel - ) -> dict[str, Any]: - """Create a placeholder record when extraction fails""" - return { - "operation_name": operation.label or operation.__class__.__name__, - "operation_hash": invocation.content_hash(), - "invocation_id": hash(invocation), - "created_at": time.time(), - "operation_type": operation.__class__.__name__, - "tag_keys": [], - "packet_keys": [], - "is_placeholder": True, - } - - # DataFrame-like interface - def __getitem__(self, condition) -> "FilteredPipelineView": - """Enable pandas-like filtering: view[condition]""" - df = self.df - if isinstance(condition, pd.Series): - filtered_df = df[condition] - elif callable(condition): - filtered_df = df[condition(df)] - else: - filtered_df = df[condition] - - return FilteredPipelineView(self.pipeline, filtered_df, self._hash_to_data_map) - - def query(self, expr: str) -> "FilteredPipelineView": - """SQL-like querying: view.query('operation_name == "MyOperation"')""" - df = self.df - filtered_df = df.query(expr) - return FilteredPipelineView(self.pipeline, filtered_df, self._hash_to_data_map) - - def groupby(self, *args, **kwargs) -> "GroupedPipelineView": - """Group operations similar to pandas groupby""" - df = self.df - grouped = df.groupby(*args, **kwargs) - return GroupedPipelineView(self.pipeline, grouped, self._hash_to_data_map) - - def head(self, n: int = 5) -> pd.DataFrame: - """Return first n rows""" - return self.df.head(n) - - def info(self) -> None: - """Display DataFrame info""" - return self.df.info() - - def describe(self) -> pd.DataFrame: - """Generate descriptive statistics""" - return self.df.describe() - - # Persistence methods - def save(self, path: Path | str, include_pipeline: bool = True) -> None: - """Save view, optionally with complete pipeline state""" - path = Path(path) - - # Build the view data if not cached - df = self.df - - view_data = { - "view_id": self.view_id, - "view_type": self.__class__.__name__, - "dataframe": df, - "build_options": self._build_options, - "hash_to_data_map": self._hash_to_data_map, - "created_at": time.time(), - "pipeline_hash": self._compute_pipeline_hash(), - } - - if include_pipeline: - view_data["pipeline_state"] = { - "name": self.pipeline.name, - "invocation_lut": self.pipeline.invocation_lut, - } - view_data["has_pipeline"] = True - else: - view_data["pipeline_name"] = self.pipeline.name - view_data["has_pipeline"] = False - - with open(path, "wb") as f: - pickle.dump(view_data, f, protocol=pickle.HIGHEST_PROTOCOL) - - @classmethod - def load( - cls, path: Path | str, pipeline: Pipeline | None = None - ) -> "PandasPipelineView": - """Load view, reconstructing pipeline if needed""" - with open(path, "rb") as f: - view_data = pickle.load(f) - - # Handle pipeline reconstruction - if view_data["has_pipeline"]: - pipeline = Pipeline(view_data["pipeline_state"]["name"]) - pipeline.invocation_lut = view_data["pipeline_state"]["invocation_lut"] - elif pipeline is None: - raise ValueError( - "View was saved without pipeline state. " - "You must provide a pipeline parameter." - ) - - # Reconstruct view - build_options = view_data.get("build_options", {}) - view = cls( - pipeline, - view_id=view_data["view_id"], - max_records=build_options.get("max_records", 10000), - sample_size=build_options.get("sample_size", 100), - ) - view._cached_data = view_data["dataframe"] - view._hash_to_data_map = view_data.get("hash_to_data_map", {}) - - return view - - def _is_cache_valid(self, cached_view: "PandasPipelineView") -> bool: - """Check if cached view is still valid""" - try: - cached_hash = getattr(cached_view, "_pipeline_hash", None) - current_hash = self._compute_pipeline_hash() - return cached_hash == current_hash - except Exception: - return False - - def invalidate(self) -> None: - """Force re-rendering on next access""" - self._cached_data = None - cache_path = self._cache_dir / f"{self.view_id}.pkl" - if cache_path.exists(): - cache_path.unlink() - - -class FilteredPipelineView: - """Represents a filtered subset of pipeline metadata""" - - def __init__( - self, pipeline: Pipeline, filtered_df: pd.DataFrame, data_map: dict[str, Any] - ): - self.pipeline = pipeline - self.df = filtered_df - self._data_map = data_map - - def __getitem__(self, condition): - """Further filtering""" - further_filtered = self.df[condition] - return FilteredPipelineView(self.pipeline, further_filtered, self._data_map) - - def query(self, expr: str): - """Apply additional query""" - further_filtered = self.df.query(expr) - return FilteredPipelineView(self.pipeline, further_filtered, self._data_map) - - def to_pandas(self) -> pd.DataFrame: - """Convert to regular pandas DataFrame""" - return self.df.copy() - - def head(self, n: int = 5) -> pd.DataFrame: - """Return first n rows""" - return self.df.head(n) - - def __len__(self) -> int: - return len(self.df) - - def __repr__(self) -> str: - return f"FilteredPipelineView({len(self.df)} records)" - - -class GroupedPipelineView: - """Represents grouped pipeline metadata""" - - def __init__(self, pipeline: Pipeline, grouped_df, data_map: dict[str, Any]): - self.pipeline = pipeline - self.grouped = grouped_df - self._data_map = data_map - - def apply(self, func): - """Apply function to each group""" - return self.grouped.apply(func) - - def agg(self, *args, **kwargs): - """Aggregate groups""" - return self.grouped.agg(*args, **kwargs) - - def size(self): - """Get group sizes""" - return self.grouped.size() - - def get_group(self, name): - """Get specific group""" - group_df = self.grouped.get_group(name) - return FilteredPipelineView(self.pipeline, group_df, self._data_map) - - -# Basic DataJoint View (simplified implementation) -class DataJointPipelineView(PipelineView): - """ - Basic DataJoint view - creates tables for pipeline operations - This is a simplified version - you can expand based on your existing DJ code - """ - - def __init__(self, pipeline: Pipeline, schema, view_id: str = "dj_view"): - super().__init__(pipeline, view_id) - self.schema = schema - self._tables = {} - - def save(self, path: Path | str, include_pipeline: bool = True) -> None: - """Save DataJoint view metadata""" - view_data = { - "view_id": self.view_id, - "view_type": self.__class__.__name__, - "schema_database": self.schema.database, - "table_names": list(self._tables.keys()), - "created_at": time.time(), - } - - if include_pipeline: - view_data["pipeline_state"] = { - "name": self.pipeline.name, - "invocation_lut": self.pipeline.invocation_lut, - } - view_data["has_pipeline"] = True - - with open(path, "wb") as f: - pickle.dump(view_data, f) - - @classmethod - def load( - cls, path: Path | str, pipeline: Pipeline | None = None - ) -> "DataJointPipelineView": - """Load DataJoint view""" - with open(path, "rb") as f: - view_data = pickle.load(f) - - # This would need actual DataJoint schema reconstruction - # For now, return a basic instance - if pipeline is None: - raise ValueError("Pipeline required for DataJoint view loading") - - # You'd need to reconstruct the schema here - view = cls(pipeline, None, view_id=view_data["view_id"]) # schema=None for now - return view - - def generate_tables(self): - """Generate DataJoint tables from pipeline - placeholder implementation""" - # This would use your existing DataJoint generation logic - # from your dj/tracker.py file - pass - - -# Utility functions -def validate_pipeline_serializability(pipeline: Pipeline) -> None: - """Helper to check if pipeline can be saved""" - try: - pipeline._validate_serializable() - print("✅ Pipeline is ready for serialization") - - # Additional performance warnings - stats = pipeline.get_stats() - if stats["total_invocations"] > 1000: - print( - f"⚠️ Large pipeline ({stats['total_invocations']} invocations) - views may be slow to build" - ) - - except SerializationError as e: - print("❌ Pipeline cannot be serialized:") - print(str(e)) - print("\n💡 Convert lambda functions to named functions:") - print(" lambda x: x > 0.8 → def filter_func(x): return x > 0.8") - - -def create_example_pipeline() -> Pipeline: - """Create an example pipeline for testing""" - from orcapod import GlobSource, function_pod - - @function_pod - def example_function(input_file): - return f"processed_{input_file}" - - pipeline = Pipeline("example") - - with pipeline: - # This would need actual operations to be meaningful - # source = GlobSource('data', './test_data', '*.txt')() - # results = source >> example_function - pass - - return pipeline diff --git a/src/orcapod/pod/__init__.py b/src/orcapod/pod/__init__.py deleted file mode 100644 index 8567c2a..0000000 --- a/src/orcapod/pod/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .core import Pod, FunctionPod, function_pod, TypedFunctionPod, typed_function_pod - -__all__ = [ - "Pod", - "FunctionPod", - "function_pod", - "TypedFunctionPod", - "typed_function_pod", -] diff --git a/src/orcapod/pod/core.py b/src/orcapod/pod/core.py deleted file mode 100644 index a82944f..0000000 --- a/src/orcapod/pod/core.py +++ /dev/null @@ -1,868 +0,0 @@ -import functools -import logging -import pickle -import warnings -from abc import abstractmethod -import pyarrow as pa -import sys -from collections.abc import Callable, Collection, Iterable, Iterator, Sequence -from typing import ( - Any, - Literal, -) - -from orcapod.types.registry import PacketConverter - -from orcapod.core.base import Kernel -from orcapod.hashing import ( - ObjectHasher, - ArrowHasher, - FunctionInfoExtractor, - get_function_signature, - hash_function, - get_default_object_hasher, - get_default_arrow_hasher, -) -from orcapod.core.operators import Join -from orcapod.store import DataStore, ArrowDataStore, NoOpDataStore -from orcapod.core.streams import SyncStream, SyncStreamFromGenerator -from orcapod.types import Packet, PathSet, PodFunction, Tag, TypeSpec - -from orcapod.types.default import default_registry -from orcapod.types.inference import ( - extract_function_data_types, - verify_against_typespec, - check_typespec_compatibility, -) -from orcapod.types.registry import is_packet_supported -import polars as pl - -logger = logging.getLogger(__name__) - - -def function_pod( - output_keys: Collection[str] | None = None, - function_name: str | None = None, - data_store: DataStore | None = None, - store_name: str | None = None, - function_hash_mode: Literal["signature", "content", "name", "custom"] = "name", - custom_hash: int | None = None, - force_computation: bool = False, - skip_memoization: bool = False, - error_handling: Literal["raise", "ignore", "warn"] = "raise", - **kwargs, -) -> Callable[..., "FunctionPod"]: - """ - Decorator that wraps a function in a FunctionPod instance. - - Args: - output_keys: Keys for the function output - force_computation: Whether to force computation - skip_memoization: Whether to skip memoization - - Returns: - FunctionPod instance wrapping the decorated function - """ - - def decorator(func) -> FunctionPod: - if func.__name__ == "": - raise ValueError("Lambda functions cannot be used with function_pod") - - if not hasattr(func, "__module__") or func.__module__ is None: - raise ValueError( - f"Function {func.__name__} must be defined at module level" - ) - - # Store the original function in the module for pickling purposes - # and make sure to change the name of the function - module = sys.modules[func.__module__] - base_function_name = func.__name__ - new_function_name = f"_original_{func.__name__}" - setattr(module, new_function_name, func) - # rename the function to be consistent and make it pickleable - setattr(func, "__name__", new_function_name) - setattr(func, "__qualname__", new_function_name) - - # Create the FunctionPod - pod = FunctionPod( - function=func, - output_keys=output_keys, - function_name=function_name or base_function_name, - data_store=data_store, - store_name=store_name, - function_hash_mode=function_hash_mode, - custom_hash=custom_hash, - force_computation=force_computation, - skip_memoization=skip_memoization, - error_handling=error_handling, - **kwargs, - ) - - return pod - - return decorator - - -class Pod(Kernel): - """ - An (abstract) base class for all pods. A pod can be seen as a special type of operation that - only operates on the packet content without reading tags. Consequently, no operation - of Pod can dependent on the tags of the packets. This is a design choice to ensure that - the pods act as pure functions which is a necessary condition to guarantee reproducibility. - """ - - def __init__( - self, error_handling: Literal["raise", "ignore", "warn"] = "raise", **kwargs - ): - super().__init__(**kwargs) - self.error_handling = error_handling - - def process_stream(self, *streams: SyncStream) -> list[SyncStream]: - """ - Prepare the incoming streams for execution in the pod. This default implementation - joins all the streams together and raises and error if no streams are provided. - """ - # if multiple streams are provided, join them - # otherwise, return as is - combined_streams = list(streams) - if len(streams) > 1: - stream = streams[0] - for next_stream in streams[1:]: - stream = Join()(stream, next_stream) - combined_streams = [stream] - return combined_streams - - def __call__(self, *streams: SyncStream, **kwargs) -> SyncStream: - stream = self.process_stream(*streams) - return super().__call__(*stream, **kwargs) - - def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet]: ... - - def forward(self, *streams: SyncStream) -> SyncStream: - # if multiple streams are provided, join them - if len(streams) > 1: - raise ValueError("Multiple streams should be joined before calling forward") - if len(streams) == 0: - raise ValueError("No streams provided to forward") - stream = streams[0] - - def generator() -> Iterator[tuple[Tag, Packet]]: - n_computed = 0 - for tag, packet in stream: - try: - tag, output_packet = self.call(tag, packet) - n_computed += 1 - logger.info(f"Computed item {n_computed}") - yield tag, output_packet - - except Exception as e: - logger.error(f"Error processing packet {packet}: {e}") - if self.error_handling == "raise": - raise e - elif self.error_handling == "ignore": - continue - elif self.error_handling == "warn": - warnings.warn(f"Error processing packet {packet}: {e}") - continue - - return SyncStreamFromGenerator(generator) - - -class FunctionPod(Pod): - """ - A pod that wraps a function and allows it to be used as an operation in a stream. - This pod can be used to apply a function to the packets in a stream, with optional memoization - and caching of results. It can also handle multiple output keys and error handling. - The function should accept keyword arguments that correspond to the keys in the packets. - The output of the function should be a path or a collection of paths that correspond to the output keys.""" - - def __init__( - self, - function: PodFunction, - output_keys: Collection[str] | None = None, - function_name=None, - data_store: DataStore | None = None, - store_name: str | None = None, - function_hash_mode: Literal["signature", "content", "name", "custom"] = "name", - custom_hash: int | None = None, - label: str | None = None, - force_computation: bool = False, - skip_memoization_lookup: bool = False, - skip_memoization: bool = False, - error_handling: Literal["raise", "ignore", "warn"] = "raise", - _hash_function_kwargs: dict | None = None, - **kwargs, - ) -> None: - super().__init__(label=label, **kwargs) - self.function = function - self.output_keys = output_keys or [] - if function_name is None: - if hasattr(self.function, "__name__"): - function_name = getattr(self.function, "__name__") - else: - raise ValueError( - "function_name must be provided if function has no __name__ attribute" - ) - - self.function_name = function_name - self.data_store = data_store if data_store is not None else NoOpDataStore() - self.store_name = store_name or function_name - self.function_hash_mode = function_hash_mode - self.custom_hash = custom_hash - self.force_computation = force_computation - self.skip_memoization_lookup = skip_memoization_lookup - self.skip_memoization = skip_memoization - self.error_handling = error_handling - self._hash_function_kwargs = _hash_function_kwargs - - def __repr__(self) -> str: - func_sig = get_function_signature(self.function) - return f"FunctionPod:{func_sig} ⇒ {self.output_keys}" - - def keys( - self, *streams: SyncStream, trigger_run: bool = False - ) -> tuple[Collection[str] | None, Collection[str] | None]: - stream = self.process_stream(*streams) - tag_keys, _ = stream[0].keys(trigger_run=trigger_run) - return tag_keys, tuple(self.output_keys) - - def is_memoized(self, packet: Packet) -> bool: - return self.retrieve_memoized(packet) is not None - - def retrieve_memoized(self, packet: Packet) -> Packet | None: - """ - Retrieve a memoized packet from the data store. - Returns None if no memoized packet is found. - """ - return self.data_store.retrieve_memoized( - self.store_name, - self.content_hash(char_count=16), - packet, - ) - - def memoize( - self, - packet: Packet, - output_packet: Packet, - ) -> Packet: - """ - Memoize the output packet in the data store. - Returns the memoized packet. - """ - return self.data_store.memoize( - self.store_name, - self.content_hash(char_count=16), # identity of this function pod - packet, - output_packet, - ) - - def forward(self, *streams: SyncStream) -> SyncStream: - # if multiple streams are provided, join them - if len(streams) > 1: - raise ValueError("Multiple streams should be joined before calling forward") - if len(streams) == 0: - raise ValueError("No streams provided to forward") - stream = streams[0] - - def generator() -> Iterator[tuple[Tag, Packet]]: - n_computed = 0 - for tag, packet in stream: - output_values: list["PathSet"] = [] - try: - if not self.skip_memoization_lookup: - memoized_packet = self.retrieve_memoized(packet) - else: - memoized_packet = None - if not self.force_computation and memoized_packet is not None: - logger.info("Memoized packet found, skipping computation") - yield tag, memoized_packet - continue - values = self.function(**packet) - - if len(self.output_keys) == 0: - output_values = [] - elif len(self.output_keys) == 1: - output_values = [values] # type: ignore - elif isinstance(values, Iterable): - output_values = list(values) # type: ignore - elif len(self.output_keys) > 1: - raise ValueError( - "Values returned by function must be a pathlike or a sequence of pathlikes" - ) - - if len(output_values) != len(self.output_keys): - raise ValueError( - f"Number of output keys {len(self.output_keys)}:{self.output_keys} does not match number of values returned by function {len(output_values)}" - ) - except Exception as e: - logger.error(f"Error processing packet {packet}: {e}") - if self.error_handling == "raise": - raise e - elif self.error_handling == "ignore": - continue - elif self.error_handling == "warn": - warnings.warn(f"Error processing packet {packet}: {e}") - continue - - output_packet: Packet = { - k: v for k, v in zip(self.output_keys, output_values) - } - - if not self.skip_memoization: - # output packet may be modified by the memoization process - # e.g. if the output is a file, the path may be changed - output_packet = self.memoize(packet, output_packet) # type: ignore - - n_computed += 1 - logger.info(f"Computed item {n_computed}") - yield tag, output_packet - - return SyncStreamFromGenerator(generator) - - def identity_structure(self, *streams) -> Any: - content_kwargs = self._hash_function_kwargs - if self.function_hash_mode == "content": - if content_kwargs is None: - content_kwargs = { - "include_name": False, - "include_module": False, - "include_declaration": False, - } - function_hash_value = hash_function( - self.function, - name_override=self.function_name, - function_hash_mode="content", - content_kwargs=content_kwargs, - ) - elif self.function_hash_mode == "signature": - function_hash_value = hash_function( - self.function, - name_override=self.function_name, - function_hash_mode="signature", - content_kwargs=content_kwargs, - ) - elif self.function_hash_mode == "name": - function_hash_value = hash_function( - self.function, - name_override=self.function_name, - function_hash_mode="name", - content_kwargs=content_kwargs, - ) - elif self.function_hash_mode == "custom": - if self.custom_hash is None: - raise ValueError("Custom hash function not provided") - function_hash_value = self.custom_hash - else: - raise ValueError( - f"Unknown function hash mode: {self.function_hash_mode}. " - "Must be one of 'content', 'signature', 'name', or 'custom'." - ) - - return ( - self.__class__.__name__, - function_hash_value, - tuple(self.output_keys), - ) + tuple(streams) - - -def typed_function_pod( - output_keys: str | Collection[str] | None = None, - function_name: str | None = None, - label: str | None = None, - result_store: ArrowDataStore | None = None, - tag_store: ArrowDataStore | None = None, - object_hasher: ObjectHasher | None = None, - arrow_hasher: ArrowHasher | None = None, - **kwargs, -) -> Callable[..., "TypedFunctionPod | CachedFunctionPod"]: - """ - Decorator that wraps a function in a FunctionPod instance. - - Args: - output_keys: Keys for the function output(s) - function_name: Name of the function pod; if None, defaults to the function name - **kwargs: Additional keyword arguments to pass to the FunctionPod constructor. Please refer to the FunctionPod documentation for details. - - Returns: - FunctionPod instance wrapping the decorated function - """ - - def decorator(func) -> TypedFunctionPod | CachedFunctionPod: - if func.__name__ == "": - raise ValueError("Lambda functions cannot be used with function_pod") - - if not hasattr(func, "__module__") or func.__module__ is None: - raise ValueError( - f"Function {func.__name__} must be defined at module level" - ) - - # Store the original function in the module for pickling purposes - # and make sure to change the name of the function - module = sys.modules[func.__module__] - base_function_name = func.__name__ - new_function_name = f"_original_{func.__name__}" - setattr(module, new_function_name, func) - # rename the function to be consistent and make it pickleable - setattr(func, "__name__", new_function_name) - setattr(func, "__qualname__", new_function_name) - - # Create a simple typed function pod - pod = TypedFunctionPod( - function=func, - output_keys=output_keys, - function_name=function_name or base_function_name, - label=label, - **kwargs, - ) - - if result_store is not None: - pod = CachedFunctionPod( - function_pod=pod, - object_hasher=object_hasher - if object_hasher is not None - else get_default_object_hasher(), - arrow_hasher=arrow_hasher - if arrow_hasher is not None - else get_default_arrow_hasher(), - result_store=result_store, - tag_store=tag_store, - ) - - return pod - - return decorator - - -class TypedFunctionPod(Pod): - """ - A type-aware pod that wraps a function and provides automatic type validation and inference. - - This pod extends the base Pod functionality by automatically extracting and validating - type information from function signatures and user-provided specifications. It ensures - type safety by verifying that both input and output types are supported by the - configured type registry before execution. - - The TypedFunctionPod analyzes the wrapped function's signature to determine: - - Parameter types (from annotations or user-provided input_types) - - Return value types (from annotations or user-provided output_types) - - Type compatibility with the packet type registry - - Key Features: - - Automatic type extraction from function annotations - - Type override support via input_types and output_types parameters - - Registry-based type validation ensuring data compatibility - - Memoization support with type-aware caching - - Multiple output key handling with proper type mapping - - Comprehensive error handling for type mismatches - - Type Resolution Priority: - 1. User-provided input_types/output_types override function annotations - 2. Function parameter annotations are used when available - 3. Function return annotations are parsed for output type inference - 4. Error raised if types cannot be determined or are unsupported - - Args: - function: The function to wrap. Must accept keyword arguments corresponding - to packet keys and return values compatible with output_keys. - output_keys: Collection of string keys for the function outputs. For functions - returning a single value, provide a single key. For multiple returns - (tuple/list), provide keys matching the number of return items. - function_name: Optional name for the function. Defaults to function.__name__. - input_types: Optional mapping of parameter names to their types. Overrides - function annotations for specified parameters. - output_types: Optional type specification for return values. Can be: - - A dict mapping output keys to types (TypeSpec) - - A sequence of types mapped to output_keys in order - These override inferred types from function return annotations. - data_store: DataStore instance for memoization. Defaults to NoOpDataStore. - function_hasher: Hasher function for creating function identity hashes. - Required parameter - no default implementation available. - label: Optional label for the pod instance. - skip_memoization_lookup: If True, skips checking for memoized results. - skip_memoization: If True, disables memoization entirely. - error_handling: How to handle execution errors: - - "raise": Raise exceptions (default) - - "ignore": Skip failed packets silently - - "warn": Issue warnings and continue - packet_type_registry: Registry for validating packet types. Defaults to - the default registry if None. - **kwargs: Additional arguments passed to the parent Pod class and above. - - Raises: - ValueError: When: - - function_name cannot be determined and is not provided - - Input types are not supported by the registry - - Output types are not supported by the registry - - Type extraction fails due to missing annotations/specifications - NotImplementedError: When function_hasher is None (required parameter). - - Examples: - Basic usage with annotated function: - - >>> def process_data(text: str, count: int) -> tuple[str, int]: - ... return text.upper(), count * 2 - >>> - >>> pod = TypedFunctionPod( - ... function=process_data, - ... output_keys=['upper_text', 'doubled_count'], - ... function_hasher=my_hasher - ... ) - - Override types for legacy function: - - >>> def legacy_func(x, y): # No annotations - ... return x + y - >>> - >>> pod = TypedFunctionPod( - ... function=legacy_func, - ... output_keys=['sum'], - ... input_types={'x': int, 'y': int}, - ... output_types={'sum': int}, - ... function_hasher=my_hasher - ... ) - - Multiple outputs with sequence override: - - >>> def analyze(data: list) -> tuple[int, float, str]: - ... return len(data), sum(data), str(data) - >>> - >>> pod = TypedFunctionPod( - ... function=analyze, - ... output_keys=['count', 'total', 'repr'], - ... output_types=[int, float, str], # Override with sequence - ... function_hasher=my_hasher - ... ) - - Attributes: - function: The wrapped function. - output_keys: List of output key names. - function_name: Name identifier for the function. - function_input_types: Resolved input type specification. - function_output_types: Resolved output type specification. - registry: Type registry for validation. - data_store: DataStore instance for memoization. - function_hasher: Function hasher for identity computation. - skip_memoization_lookup: Whether to skip memoization lookups. - skip_memoization: Whether to disable memoization entirely. - error_handling: Error handling strategy. - - Note: - The TypedFunctionPod requires a function_hasher to be provided as there - is no default implementation. This hasher is used to create stable - identity hashes for memoization and caching purposes. - - Type validation occurs during initialization, ensuring that any type - incompatibilities are caught early rather than during stream processing. - """ - - def __init__( - self, - function: Callable[..., Any], - output_keys: str | Collection[str] | None = None, - function_name=None, - input_types: TypeSpec | None = None, - output_types: TypeSpec | Sequence[type] | None = None, - label: str | None = None, - packet_type_registry=None, - function_info_extractor: FunctionInfoExtractor | None = None, - **kwargs, - ) -> None: - super().__init__(label=label, **kwargs) - self.function = function - if output_keys is None: - output_keys = [] - if isinstance(output_keys, str): - output_keys = [output_keys] - self.output_keys = output_keys - if function_name is None: - if hasattr(self.function, "__name__"): - function_name = getattr(self.function, "__name__") - else: - raise ValueError( - "function_name must be provided if function has no __name__ attribute" - ) - self.function_name = function_name - - if packet_type_registry is None: - packet_type_registry = default_registry - - self.registry = packet_type_registry - self.function_info_extractor = function_info_extractor - - # extract input and output types from the function signature - function_input_types, function_output_types = extract_function_data_types( - self.function, - self.output_keys, - input_types=input_types, - output_types=output_types, - ) - - self.function_input_types = function_input_types - self.function_output_types = function_output_types - - # TODO: include explicit check of support during PacketConverter creation - self.input_converter = PacketConverter(self.function_input_types, self.registry) - self.output_converter = PacketConverter( - self.function_output_types, self.registry - ) - - # TODO: prepare a separate str and repr methods - def __repr__(self) -> str: - func_sig = get_function_signature(self.function) - return f"FunctionPod:{func_sig} ⇒ {self.output_keys}" - - def call(self, tag, packet) -> tuple[Tag, Packet]: - output_values: list["PathSet"] = [] - - values = self.function(**packet) - - if len(self.output_keys) == 0: - output_values = [] - elif len(self.output_keys) == 1: - output_values = [values] # type: ignore - elif isinstance(values, Iterable): - output_values = list(values) # type: ignore - elif len(self.output_keys) > 1: - raise ValueError( - "Values returned by function must be a pathlike or a sequence of pathlikes" - ) - - if len(output_values) != len(self.output_keys): - raise ValueError( - f"Number of output keys {len(self.output_keys)}:{self.output_keys} does not match number of values returned by function {len(output_values)}" - ) - - output_packet: Packet = {k: v for k, v in zip(self.output_keys, output_values)} - return tag, output_packet - - def identity_structure(self, *streams) -> Any: - # construct identity structure for the function - # if function_info_extractor is available, use that but substitute the function_name - if self.function_info_extractor is not None: - function_info = self.function_info_extractor.extract_function_info( - self.function, - function_name=self.function_name, - input_types=self.function_input_types, - output_types=self.function_output_types, - ) - else: - # use basic information only - function_info = { - "name": self.function_name, - "input_types": self.function_input_types, - "output_types": self.function_output_types, - } - function_info["output_keys"] = tuple(self.output_keys) - - return ( - self.__class__.__name__, - function_info, - ) + tuple(streams) - - def keys( - self, *streams: SyncStream, trigger_run: bool = False - ) -> tuple[Collection[str] | None, Collection[str] | None]: - stream = self.process_stream(*streams) - tag_keys, _ = stream[0].keys(trigger_run=trigger_run) - return tag_keys, tuple(self.output_keys) - - -class CachedFunctionPod(Pod): - def __init__( - self, - function_pod: TypedFunctionPod, - object_hasher: ObjectHasher, - arrow_hasher: ArrowHasher, - result_store: ArrowDataStore, - tag_store: ArrowDataStore | None = None, - label: str | None = None, - skip_memoization_lookup: bool = False, - skip_memoization: bool = False, - skip_tag_record: bool = False, - error_handling: Literal["raise", "ignore", "warn"] = "raise", - **kwargs, - ) -> None: - super().__init__(label=label, error_handling=error_handling, **kwargs) - self.function_pod = function_pod - - self.object_hasher = object_hasher - self.arrow_hasher = arrow_hasher - self.result_store = result_store - self.tag_store = tag_store - - self.skip_memoization_lookup = skip_memoization_lookup - self.skip_memoization = skip_memoization - self.skip_tag_record = skip_tag_record - - # TODO: consider making this dynamic - self.function_pod_hash = self.object_hasher.hash_to_hex(self.function_pod) - - def get_packet_key(self, packet: Packet) -> str: - return self.arrow_hasher.hash_table( - self.function_pod.input_converter.to_arrow_table(packet) - ) - - # TODO: prepare a separate str and repr methods - def __repr__(self) -> str: - return f"Cached:{self.function_pod}" - - def keys( - self, *streams: SyncStream, trigger_run: bool = False - ) -> tuple[Collection[str] | None, Collection[str] | None]: - return self.function_pod.keys(*streams, trigger_run=trigger_run) - - def is_memoized(self, packet: Packet) -> bool: - return self.retrieve_memoized(packet) is not None - - def add_tag_record(self, tag: Tag, packet: Packet) -> Tag: - """ - Record the tag for the packet in the record store. - This is used to keep track of the tags associated with memoized packets. - """ - - return self._add_tag_record_with_packet_key(tag, self.get_packet_key(packet)) - - def _add_tag_record_with_packet_key(self, tag: Tag, packet_key: str) -> Tag: - if self.tag_store is None: - raise ValueError("Recording of tag requires tag_store but none provided") - - tag = dict(tag) # ensure we don't modify the original tag - tag["__packet_key"] = packet_key - - # convert tag to arrow table - table = pa.Table.from_pylist([tag]) - - entry_hash = self.arrow_hasher.hash_table(table) - - # TODO: add error handling - # check if record already exists: - retrieved_table = self.tag_store.get_record( - self.function_pod.function_name, self.function_pod_hash, entry_hash - ) - if retrieved_table is None: - self.tag_store.add_record( - self.function_pod.function_name, - self.function_pod_hash, - entry_hash, - table, - ) - - return tag - - def retrieve_memoized(self, packet: Packet) -> Packet | None: - """ - Retrieve a memoized packet from the data store. - Returns None if no memoized packet is found. - """ - logger.info("Retrieving memoized packet") - return self._retrieve_memoized_by_hash(self.get_packet_key(packet)) - - def _retrieve_memoized_by_hash(self, packet_hash: str) -> Packet | None: - """ - Retrieve a memoized result packet from the data store, looking up by hash - Returns None if no memoized packet is found. - """ - logger.info(f"Retrieving memoized packet with hash {packet_hash}") - arrow_table = self.result_store.get_record( - self.function_pod.function_name, - self.function_pod_hash, - packet_hash, - ) - if arrow_table is None: - return None - packets = self.function_pod.output_converter.from_arrow_table(arrow_table) - # since memoizing single packet, it should only contain one packet - assert len(packets) == 1, ( - f"Memoizing single packet return {len(packets)} packets!" - ) - return packets[0] - - def memoize( - self, - packet: Packet, - output_packet: Packet, - ) -> Packet: - """ - Memoize the output packet in the data store. - Returns the memoized packet. - """ - logger.info("Memoizing packet") - return self._memoize_by_hash(self.get_packet_key(packet), output_packet) - - def _memoize_by_hash(self, packet_hash: str, output_packet: Packet) -> Packet: - """ - Memoize the output packet in the data store, looking up by hash. - Returns the memoized packet. - """ - logger.info(f"Memoizing packet with hash {packet_hash}") - packets = self.function_pod.output_converter.from_arrow_table( - self.result_store.add_record( - self.function_pod.function_name, - self.function_pod_hash, - packet_hash, - self.function_pod.output_converter.to_arrow_table(output_packet), - ) - ) - # since memoizing single packet, it should only contain one packet - assert len(packets) == 1, ( - f"Memoizing single packet return {len(packets)} packets!" - ) - return packets[0] - - def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet]: - packet_key = "" - if ( - not self.skip_tag_record - or not self.skip_memoization_lookup - or not self.skip_memoization - ): - packet_key = self.get_packet_key(packet) - - if not self.skip_tag_record and self.tag_store is not None: - self._add_tag_record_with_packet_key(tag, packet_key) - - if not self.skip_memoization_lookup: - memoized_packet = self._retrieve_memoized_by_hash(packet_key) - else: - memoized_packet = None - if memoized_packet is not None: - logger.info("Memoized packet found, skipping computation") - return tag, memoized_packet - - tag, output_packet = self.function_pod.call(tag, packet) - - if not self.skip_memoization: - # output packet may be modified by the memoization process - # e.g. if the output is a file, the path may be changed - output_packet = self.memoize(packet, output_packet) # type: ignore - - return tag, output_packet - - def get_all_entries_with_tags(self) -> pl.LazyFrame | None: - """ - Retrieve all entries from the tag store with their associated tags. - Returns a DataFrame with columns for tag and packet key. - """ - if self.tag_store is None: - raise ValueError("Tag store is not set, cannot retrieve entries") - - tag_records = self.tag_store.get_all_records_as_polars( - self.function_pod.function_name, self.function_pod_hash - ) - if tag_records is None: - return None - result_packets = self.result_store.get_records_by_ids_as_polars( - self.function_pod.function_name, - self.function_pod_hash, - tag_records.collect()["__packet_key"], - preserve_input_order=True, - ) - if result_packets is None: - return None - - return pl.concat([tag_records, result_packets], how="horizontal").drop( - ["__packet_key"] - ) - - def identity_structure(self, *streams) -> Any: - return self.function_pod.identity_structure(*streams) diff --git a/src/orcapod/protocols/__init__.py b/src/orcapod/protocols/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/orcapod/protocols/data_protocols.py b/src/orcapod/protocols/data_protocols.py new file mode 100644 index 0000000..cd21645 --- /dev/null +++ b/src/orcapod/protocols/data_protocols.py @@ -0,0 +1,1683 @@ +from collections.abc import Collection, Iterator, Mapping +from datetime import datetime +from typing import Any, ContextManager, Protocol, Self, TYPE_CHECKING +from orcapod.protocols.hashing_protocols import ContentIdentifiable +from orcapod.types import DataValue, TypeSpec + +if TYPE_CHECKING: + import pyarrow as pa + + +class Datagram(Protocol): + """ + Protocol for immutable datagram containers in Orcapod. + + Datagrams are the fundamental units of data that flow through the system. + They provide a unified interface for data access, conversion, and manipulation, + ensuring consistent behavior across different storage backends (dict, Arrow table, etc.). + + Each datagram contains: + - **Data columns**: The primary business data (user_id, name, etc.) + - **Meta columns**: Internal system metadata with {orcapod.META_PREFIX} ('__') prefixes (__processed_at, etc.) + - **Context column**: Data context information ({orcapod.CONTEXT_KEY}) + + Future Packet subclass will also include: + - **Source info columns**: Data provenance with {orcapod.SOURCE_PREFIX} ('_source_') prefixes (_source_user_id, etc.) + + When exposing to external tools without field metadata support, semantic types + are encoded as `_{semantic_type}_` prefixes (_path_config_file, _id_user_name, etc.). + + All operations are immutable - methods return new datagram instances rather than + modifying existing ones. + + Example: + >>> datagram = DictDatagram({"user_id": 123, "name": "Alice"}) + >>> updated = datagram.update(name="Alice Smith") + >>> filtered = datagram.select("user_id", "name") + >>> table = datagram.as_table() + """ + + # 1. Core Properties (Identity & Structure) + @property + def data_context_key(self) -> str: + """ + Return the data context key for this datagram. + + This key identifies the semantic type registry, arrow hasher, and other + contextual information needed to properly interpret and work with this + datagram across various operations (storage, visualization, processing, etc.). + + Returns: + str: Context key for proper datagram interpretation + """ + ... + + @property + def meta_columns(self) -> tuple[str, ...]: + """Return tuple of meta column names (with {orcapod.META_PREFIX} ('__') prefix).""" + ... + + # 2. Dict-like Interface (Data Access) + def __getitem__(self, key: str) -> DataValue: + """ + Get data column value by key. + + Provides dict-like access to data columns only. Meta columns + are not accessible through this method (use `get_meta_value()` instead). + + Args: + key: Data column name. + + Returns: + The value stored in the specified data column. + + Raises: + KeyError: If the column doesn't exist in data columns. + + Example: + >>> datagram["user_id"] + 123 + >>> datagram["name"] + 'Alice' + """ + ... + + def __contains__(self, key: str) -> bool: + """ + Check if data column exists. + + Args: + key: Column name to check. + + Returns: + True if column exists in data columns, False otherwise. + + Example: + >>> "user_id" in datagram + True + >>> "nonexistent" in datagram + False + """ + ... + + def __iter__(self) -> Iterator[str]: + """ + Iterate over data column names. + + Provides for-loop support over column names, enabling natural iteration + patterns without requiring conversion to dict. + + Yields: + Data column names in no particular order. + + Example: + >>> for column in datagram: + ... value = datagram[column] + ... print(f"{column}: {value}") + """ + ... + + def get(self, key: str, default: DataValue = None) -> DataValue: + """ + Get data column value with default fallback. + + Args: + key: Data column name. + default: Value to return if column doesn't exist. + + Returns: + Column value if exists, otherwise the default value. + + Example: + >>> datagram.get("user_id") + 123 + >>> datagram.get("missing", "default") + 'default' + """ + ... + + # 3. Structural Information + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> tuple[str, ...]: + """ + Return tuple of column names. + + Provides access to column names with filtering options for different + column types. Default returns only data column names. + + Args: + include_meta_columns: Controls meta column inclusion. + - False: Return only data column names (default) + - True: Include all meta column names + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include context column. + + Returns: + Tuple of column names based on inclusion criteria. + + Example: + >>> datagram.keys() # Data columns only + ('user_id', 'name', 'email') + >>> datagram.keys(include_meta_columns=True) + ('user_id', 'name', 'email', f'{orcapod.META_PREFIX}processed_at', f'{orcapod.META_PREFIX}pipeline_version') + >>> datagram.keys(include_meta_columns=["pipeline"]) + ('user_id', 'name', 'email',f'{orcapod.META_PREFIX}pipeline_version') + >>> datagram.keys(include_context=True) + ('user_id', 'name', 'email', f'{orcapod.CONTEXT_KEY}') + """ + ... + + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> TypeSpec: + """ + Return type specification mapping field names to Python types. + + The TypeSpec enables type checking and validation throughout the system. + + Args: + include_meta_columns: Controls meta column type inclusion. + - False: Exclude meta column types (default) + - True: Include all meta column types + - Collection[str]: Include meta column types matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include context type. + + Returns: + TypeSpec mapping field names to their Python types. + + Example: + >>> datagram.types() + {'user_id': , 'name': } + """ + ... + + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> "pa.Schema": + """ + Return PyArrow schema representation. + + The schema provides structured field and type information for efficient + serialization and deserialization with PyArrow. + + Args: + include_meta_columns: Controls meta column schema inclusion. + - False: Exclude meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include context column. + + Returns: + PyArrow Schema describing the datagram structure. + + Example: + >>> schema = datagram.arrow_schema() + >>> schema.names + ['user_id', 'name'] + """ + ... + + def content_hash(self) -> str: + """ + Return deterministic hash of datagram content. + + The hash should reflect the data content, typically excluding meta columns + and context. Used for caching, comparison, and deduplication. For exact details of + hash computation, refer to the implementation in the specific datagram class/subclass. + + Returns: + Deterministic content hash string. + + Note: + Two datagrams with identical data columns will have the same hash, + even if they differ in meta columns or context. + + Example: + >>> datagram.content_hash() + 'sha256:abc123def456...' + """ + ... + + # 4. Format Conversions (Export) + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> dict[str, DataValue]: + """ + Convert datagram to dictionary format. + + Provides a simple key-value representation useful for debugging, + serialization, and interop with dict-based APIs. + + Args: + include_meta_columns: Controls meta column inclusion. + - False: Exclude all meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include the context key. + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + + + Returns: + Dictionary with requested columns as key-value pairs. + + Example: + >>> data = datagram.as_dict() # {'user_id': 123, 'name': 'Alice'} + >>> full_data = datagram.as_dict( + ... include_meta_columns=True, + ... include_context=True + ... ) + """ + ... + + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + ) -> "pa.Table": + """ + Convert datagram to PyArrow Table format. + + Provides a standardized columnar representation suitable for analysis, + processing, and interoperability with Arrow-based tools. + + Args: + include_meta_columns: Controls meta column inclusion. + - False: Exclude all meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include the context column. + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + + Returns: + PyArrow Table with requested columns. + + Example: + >>> table = datagram.as_table() # Data columns only + >>> full_table = datagram.as_table( + ... include_meta_columns=True, + ... include_context=True + ... ) + >>> filtered = datagram.as_table(include_meta_columns=["pipeline"]) # same as passing f"{orcapod.META_PREFIX}pipeline" + """ + ... + + # TODO: add this back + # def as_arrow_compatible_dict( + # self, + # include_all_info: bool = False, + # include_meta_columns: bool | Collection[str] = False, + # include_context: bool = False, + # ) -> dict[str, Any]: + # """ + # Return dictionary with values optimized for Arrow table conversion. + + # This method returns a dictionary where values are in a form that can be + # efficiently converted to Arrow format using pa.Table.from_pylist(). + + # The key insight is that this avoids the expensive as_table() → concat pattern + # by providing values that are "Arrow-ready" while remaining in dict format + # for efficient batching. + + # Implementation note: This may involve format conversions (e.g., Path objects + # to strings, datetime objects to ISO strings, etc.) to ensure compatibility + # with Arrow's expected input formats. + + # Arrow table that results from pa.Table.from_pylist on the output of this should be accompanied + # with arrow_schema(...) with the same argument options to ensure that the schema matches the table. + + # Args: + # include_all_info: Include all available information + # include_meta_columns: Controls meta column inclusion + # include_context: Whether to include context key + + # Returns: + # Dictionary with values optimized for Arrow conversion + + # Example: + # # Efficient batch conversion pattern + # arrow_dicts = [datagram.as_arrow_compatible_dict() for datagram in datagrams] + # schema = datagrams[0].arrow_schema() + # table = pa.Table.from_pylist(arrow_dicts, schema=schema) + # """ + # ... + + # 5. Meta Column Operations + def get_meta_value(self, key: str, default: DataValue = None) -> DataValue: + """ + Get meta column value with optional default. + + Meta columns store operational metadata and use {orcapod.META_PREFIX} ('__') prefixes. + This method handles both prefixed and unprefixed key formats. + + Args: + key: Meta column key (with or without {orcapod.META_PREFIX} ('__') prefix). + default: Value to return if meta column doesn't exist. + + Returns: + Meta column value if exists, otherwise the default value. + + Example: + >>> datagram.get_meta_value("pipeline_version") # Auto-prefixed + 'v2.1.0' + >>> datagram.get_meta_value("__pipeline_version") # Already prefixed + 'v2.1.0' + >>> datagram.get_meta_value("missing", "default") + 'default' + """ + ... + + def with_meta_columns(self, **updates: DataValue) -> Self: + """ + Create new datagram with updated meta columns. + + Adds or updates operational metadata while preserving all data columns. + Keys are automatically prefixed with {orcapod.META_PREFIX} ('__') if needed. + + Args: + **updates: Meta column updates as keyword arguments. + + Returns: + New datagram instance with updated meta columns. + + Example: + >>> tracked = datagram.with_meta_columns( + ... processed_by="pipeline_v2", + ... timestamp="2024-01-15T10:30:00Z" + ... ) + """ + ... + + def drop_meta_columns(self, *keys: str, ignore_missing: bool = False) -> Self: + """ + Create new datagram with specified meta columns removed. + + Args: + *keys: Meta column keys to remove (prefixes optional). + ignore_missing: If True, ignore missing columns without raising an error. + + + Returns: + New datagram instance without specified meta columns. + + Raises: + KeryError: If any specified meta column to drop doesn't exist and ignore_missing=False. + + Example: + >>> cleaned = datagram.drop_meta_columns("old_source", "temp_debug") + """ + ... + + # 6. Data Column Operations + def select(self, *column_names: str) -> Self: + """ + Create new datagram with only specified data columns. + + Args: + *column_names: Data column names to keep. + + + Returns: + New datagram instance with only specified data columns. All other columns including + meta columns and context are preserved. + + Raises: + KeyError: If any specified column doesn't exist. + + Example: + >>> subset = datagram.select("user_id", "name", "email") + """ + ... + + def drop(self, *column_names: str, ignore_missing: bool = False) -> Self: + """ + Create new datagram with specified data columns removed. Note that this does not + remove meta columns or context column. Refer to `drop_meta_columns()` for dropping + specific meta columns. Context key column can never be dropped but a modified copy + can be created with a different context key using `with_data_context()`. + + Args: + *column_names: Data column names to remove. + ignore_missing: If True, ignore missing columns without raising an error. + + Returns: + New datagram instance without specified data columns. + + Raises: + KeryError: If any specified column to drop doesn't exist and ignore_missing=False. + + Example: + >>> filtered = datagram.drop("temp_field", "debug_info") + """ + ... + + def rename( + self, + column_mapping: Mapping[str, str], + ) -> Self: + """ + Create new datagram with data columns renamed. + + Args: + column_mapping: Mapping from old names to new names. + + Returns: + New datagram instance with renamed data columns. + + Example: + >>> renamed = datagram.rename( + ... {"old_id": "user_id", "old_name": "full_name"}, + ... column_types={"user_id": int} + ... ) + """ + ... + + def update(self, **updates: DataValue) -> Self: + """ + Create new datagram with existing column values updated. + + Updates values in existing data columns. Will error if any specified + column doesn't exist - use with_columns() to add new columns. + + Args: + **updates: Column names and their new values. + + Returns: + New datagram instance with updated values. + + Raises: + KeyError: If any specified column doesn't exist. + + Example: + >>> updated = datagram.update( + ... file_path="/new/absolute/path.txt", + ... status="processed" + ... ) + """ + ... + + def with_columns( + self, + column_types: Mapping[str, type] | None = None, + **updates: DataValue, + ) -> Self: + """ + Create new datagram with additional data columns. + + Adds new data columns to the datagram. Will error if any specified + column already exists - use update() to modify existing columns. + + Args: + column_types: Optional type specifications for new columns. If not provided, the column type is + inferred from the provided values. If value is None, the column type defaults to `str`. + **kwargs: New columns as keyword arguments. + + Returns: + New datagram instance with additional data columns. + + Raises: + ValueError: If any specified column already exists. + + Example: + >>> expanded = datagram.with_columns( + ... status="active", + ... score=95.5, + ... column_types={"score": float} + ... ) + """ + ... + + # 7. Context Operations + def with_context_key(self, new_context_key: str) -> Self: + """ + Create new datagram with different context key. + + Changes the semantic interpretation context while preserving all data. + The context key affects how columns are processed and converted. + + Args: + new_context_key: New context key string. + + Returns: + New datagram instance with updated context key. + + Note: + How the context is interpreted depends on the datagram implementation. + Semantic processing may be rebuilt for the new context. + + Example: + >>> financial_datagram = datagram.with_context_key("financial_v1") + """ + ... + + # 8. Utility Operations + def copy(self) -> Self: + """ + Create a shallow copy of the datagram. + + Returns a new datagram instance with the same data and cached values. + This is more efficient than reconstructing from scratch when you need + an identical datagram instance. + + Returns: + New datagram instance with copied data and caches. + + Example: + >>> copied = datagram.copy() + >>> copied is datagram # False - different instance + False + """ + ... + + # 9. String Representations + def __str__(self) -> str: + """ + Return user-friendly string representation. + + Shows the datagram as a simple dictionary for user-facing output, + messages, and logging. Only includes data columns for clean output. + + Returns: + Dictionary-style string representation of data columns only. + """ + ... + + def __repr__(self) -> str: + """ + Return detailed string representation for debugging. + + Shows the datagram type and comprehensive information for debugging. + + Returns: + Detailed representation with type and metadata information. + """ + ... + + +class Tag(Datagram, Protocol): + """ + Metadata associated with each data item in a stream. + + Tags carry contextual information about data packets as they flow through + the computational graph. They are immutable and provide metadata that + helps with: + - Data lineage tracking + - Grouping and aggregation operations + - Temporal information (timestamps) + - Source identification + - Processing context + + Common examples include: + - Timestamps indicating when data was created/processed + - Source identifiers showing data origin + - Processing metadata like batch IDs or session information + - Grouping keys for aggregation operations + - Quality indicators or confidence scores + """ + + pass + + +class Packet(Datagram, Protocol): + """ + The actual data payload in a stream. + + Packets represent the core data being processed through the computational + graph. Unlike Tags (which are metadata), Packets contain the actual + information that computations operate on. + + Packets extend Datagram with additional capabilities for: + - Source tracking and lineage + - Content-based hashing for caching + - Metadata inclusion for debugging + + The distinction between Tag and Packet is crucial for understanding + data flow: Tags provide context, Packets provide content. + """ + + def keys( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> tuple[str, ...]: + """ + Return tuple of column names. + + Provides access to column names with filtering options for different + column types. Default returns only data column names. + + Args: + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + include_meta_columns: Controls meta column inclusion. + - False: Return only data column names (default) + - True: Include all meta column names + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include context column. + include_source: Whether to include source info fields. + + + Returns: + Tuple of column names based on inclusion criteria. + + Example: + >>> datagram.keys() # Data columns only + ('user_id', 'name', 'email') + >>> datagram.keys(include_meta_columns=True) + ('user_id', 'name', 'email', f'{orcapod.META_PREFIX}processed_at', f'{orcapod.META_PREFIX}pipeline_version') + >>> datagram.keys(include_meta_columns=["pipeline"]) + ('user_id', 'name', 'email',f'{orcapod.META_PREFIX}pipeline_version') + >>> datagram.keys(include_context=True) + ('user_id', 'name', 'email', f'{orcapod.CONTEXT_KEY}') + """ + ... + + def types( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> TypeSpec: + """ + Return type specification mapping field names to Python types. + + The TypeSpec enables type checking and validation throughout the system. + + Args: + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + include_meta_columns: Controls meta column type inclusion. + - False: Exclude meta column types (default) + - True: Include all meta column types + - Collection[str]: Include meta column types matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include context type. + include_source: Whether to include source info fields. + + Returns: + TypeSpec mapping field names to their Python types. + + Example: + >>> datagram.types() + {'user_id': , 'name': } + """ + ... + + def arrow_schema( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> "pa.Schema": + """ + Return PyArrow schema representation. + + The schema provides structured field and type information for efficient + serialization and deserialization with PyArrow. + + Args: + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + include_meta_columns: Controls meta column schema inclusion. + - False: Exclude meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include context column. + include_source: Whether to include source info fields. + + + Returns: + PyArrow Schema describing the datagram structure. + + Example: + >>> schema = datagram.arrow_schema() + >>> schema.names + ['user_id', 'name'] + """ + ... + + def as_dict( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> dict[str, DataValue]: + """ + Convert datagram to dictionary format. + + Provides a simple key-value representation useful for debugging, + serialization, and interop with dict-based APIs. + + Args: + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + include_meta_columns: Controls meta column inclusion. + - False: Exclude all meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include the context key. + include_source: Whether to include source info fields. + + + Returns: + Dictionary with requested columns as key-value pairs. + + Example: + >>> data = datagram.as_dict() # {'user_id': 123, 'name': 'Alice'} + >>> full_data = datagram.as_dict( + ... include_meta_columns=True, + ... include_context=True + ... ) + """ + ... + + def as_table( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_context: bool = False, + include_source: bool = False, + ) -> "pa.Table": + """ + Convert datagram to PyArrow Table format. + + Provides a standardized columnar representation suitable for analysis, + processing, and interoperability with Arrow-based tools. + + Args: + include_all_info: If True, include all available information. This option supersedes all other inclusion options. + include_meta_columns: Controls meta column inclusion. + - False: Exclude all meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + include_context: Whether to include the context column. + include_source: Whether to include source info columns in the schema. + + Returns: + PyArrow Table with requested columns. + + Example: + >>> table = datagram.as_table() # Data columns only + >>> full_table = datagram.as_table( + ... include_meta_columns=True, + ... include_context=True + ... ) + >>> filtered = datagram.as_table(include_meta_columns=["pipeline"]) # same as passing f"{orcapod.META_PREFIX}pipeline" + """ + ... + + # TODO: add this back + # def as_arrow_compatible_dict( + # self, + # include_all_info: bool = False, + # include_meta_columns: bool | Collection[str] = False, + # include_context: bool = False, + # include_source: bool = False, + # ) -> dict[str, Any]: + # """Extended version with source info support.""" + # ... + + def as_datagram( + self, + include_all_info: bool = False, + include_meta_columns: bool | Collection[str] = False, + include_source: bool = False, + ) -> Datagram: + """ + Convert the packet to a Datagram. + + Args: + include_meta_columns: Controls meta column inclusion. + - False: Exclude all meta columns (default) + - True: Include all meta columns + - Collection[str]: Include meta columns matching these prefixes. If absent, + {orcapod.META_PREFIX} ('__') prefix is prepended to each key. + + Returns: + Datagram: Datagram representation of packet data + """ + ... + + def source_info(self) -> dict[str, str | None]: + """ + Return metadata about the packet's source/origin. + + Provides debugging and lineage information about where the packet + originated. May include information like: + - File paths for file-based sources + - Database connection strings + - API endpoints + - Processing pipeline information + + Returns: + dict[str, str | None]: Source information for each data column as key-value pairs. + """ + ... + + +class PodFunction(Protocol): + """ + A function suitable for use in a FunctionPod. + + PodFunctions define the computational logic that operates on individual + packets within a Pod. They represent pure functions that transform + data values without side effects. + + These functions are designed to be: + - Stateless: No dependency on external state + - Deterministic: Same inputs always produce same outputs + - Serializable: Can be cached and distributed + - Type-safe: Clear input/output contracts + + PodFunctions accept named arguments corresponding to packet fields + and return transformed data values. + """ + + def __call__(self, **kwargs: DataValue) -> None | DataValue: + """ + Execute the pod function with the given arguments. + + The function receives packet data as named arguments and returns + either transformed data or None (for filtering operations). + + Args: + **kwargs: Named arguments mapping packet fields to data values + + Returns: + None: Filter out this packet (don't include in output) + DataValue: Single transformed value + + Raises: + TypeError: If required arguments are missing + ValueError: If argument values are invalid + """ + ... + + +class Labelable(Protocol): + """ + Protocol for objects that can have a human-readable label. + + Labels provide meaningful names for objects in the computational graph, + making debugging, visualization, and monitoring much easier. They serve + as human-friendly identifiers that complement the technical identifiers + used internally. + + Labels are optional but highly recommended for: + - Debugging complex computational graphs + - Visualization and monitoring tools + - Error messages and logging + - User interfaces and dashboards + """ + + @property + def label(self) -> str | None: + """ + Return the human-readable label for this object. + + Labels should be descriptive and help users understand the purpose + or role of the object in the computational graph. + + Returns: + str: Human-readable label for this object + None: No label is set (will use default naming) + """ + ... + + +class Stream(ContentIdentifiable, Labelable, Protocol): + """ + Base protocol for all streams in Orcapod. + + Streams represent sequences of (Tag, Packet) pairs flowing through the + computational graph. They are the fundamental data structure connecting + kernels and carrying both data and metadata. + + Streams can be either: + - Static: Immutable snapshots created at a specific point in time + - Live: Dynamic streams that stay current with upstream dependencies + + All streams provide: + - Iteration over (tag, packet) pairs + - Type information and schema access + - Lineage information (source kernel and upstream streams) + - Basic caching and freshness tracking + - Conversion to common formats (tables, dictionaries) + """ + + @property + def source(self) -> "Kernel | None": + """ + The kernel that produced this stream. + + This provides lineage information for tracking data flow through + the computational graph. Root streams (like file sources) may + have no source kernel. + + Returns: + Kernel: The source kernel that created this stream + None: This is a root stream with no source kernel + """ + ... + + @property + def upstreams(self) -> tuple["Stream", ...]: + """ + Input streams used to produce this stream. + + These are the streams that were provided as input to the source + kernel when this stream was created. Used for dependency tracking + and cache invalidation. + + Returns: + tuple[Stream, ...]: Upstream dependency streams (empty for sources) + """ + ... + + def keys(self) -> tuple[tuple[str, ...], tuple[str, ...]]: + """ + Available keys/fields in the stream content. + + Returns the field names present in both tags and packets. + This provides schema information without requiring type details, + useful for: + - Schema inspection and exploration + - Query planning and optimization + - Field validation and mapping + + Returns: + tuple[tuple[str, ...], tuple[str, ...]]: (tag_keys, packet_keys) + """ + ... + + def types(self) -> tuple[TypeSpec, TypeSpec]: + """ + Type specifications for the stream content. + + Returns the type schema for both tags and packets in this stream. + This information is used for: + - Type checking and validation + - Schema inference and planning + - Compatibility checking between kernels + + Returns: + tuple[TypeSpec, TypeSpec]: (tag_types, packet_types) + """ + ... + + @property + def last_modified(self) -> datetime | None: + """ + When the stream's content was last modified. + + This property is crucial for caching decisions and dependency tracking: + - datetime: Content was last modified at this time (cacheable) + - None: Content is never stable, always recompute (some dynamic streams) + + Both static and live streams typically return datetime values, but + live streams update this timestamp whenever their content changes. + + Returns: + datetime: Timestamp of last modification for most streams + None: Stream content is never stable (some special dynamic streams) + """ + ... + + @property + def is_current(self) -> bool: + """ + Whether the stream is up-to-date with its dependencies. + + A stream is current if its content reflects the latest state of its + source kernel and upstream streams. This is used for cache validation + and determining when refresh is needed. + + For live streams, this should always return True since they stay + current automatically. For static streams, this indicates whether + the cached content is still valid. + + Returns: + bool: True if stream is up-to-date, False if refresh needed + """ + ... + + def __iter__(self) -> Iterator[tuple[Tag, Packet]]: + """ + Iterate over (tag, packet) pairs in the stream. + + This is the primary way to access stream data. The behavior depends + on the stream type: + - Static streams: Return cached/precomputed data + - Live streams: May trigger computation and always reflect current state + + Yields: + tuple[Tag, Packet]: Sequential (tag, packet) pairs + """ + ... + + def iter_packets(self) -> Iterator[tuple[Tag, Packet]]: + """ + Alias for __iter__ for explicit packet iteration. + + Provides a more explicit method name when the intent is to iterate + over packets specifically, improving code readability. + + This method must return an immutable iterator -- that is, the returned iterator + should not change and must consistently return identical tag,packet pairs across + multiple iterations of the iterator. + + Note that this is NOT to mean that multiple invocation of `iter_packets` must always + return an identical iterator. The iterator returned by `iter_packets` may change + between invocations, but the iterator itself must not change. Consequently, it should be understood + that the returned iterators may be a burden on memory if the stream is large or infinite. + + Yields: + tuple[Tag, Packet]: Sequential (tag, packet) pairs + """ + ... + + def as_table( + self, + include_data_context: bool = False, + include_source: bool = False, + include_content_hash: bool | str = False, + ) -> "pa.Table": + """ + Convert the entire stream to a PyArrow Table. + + Materializes all (tag, packet) pairs into a single table for + analysis and processing. This operation may be expensive for + large streams or live streams that need computation. + + If include_content_hash is True, an additional column called "_content_hash" + containing the content hash of each packet is included. If include_content_hash + is a string, it is used as the name of the content hash column. + + Returns: + pa.Table: Complete stream data as a PyArrow Table + """ + ... + + def flow(self) -> Collection[tuple[Tag, Packet]]: + """ + Return the entire stream as a collection of (tag, packet) pairs. + + This method materializes the stream content into a list or similar + collection type. It is useful for small streams or when you need + to process all data at once. + + Returns: + Collection[tuple[Tag, Packet]]: All (tag, packet) pairs in the stream + """ + ... + + +class LiveStream(Stream, Protocol): + """ + A stream that automatically stays up-to-date with its upstream dependencies. + + LiveStream extends the base Stream protocol with capabilities for "up-to-date" + data flow and reactive computation. Unlike static streams which represent + snapshots, LiveStreams provide the guarantee that their content always + reflects the current state of their dependencies. + + Key characteristics: + - Automatically refresh the stream if changes in the upstreams are detected + - Track last_modified timestamp when content changes + - Support manual refresh triggering and invalidation + - By design, LiveStream would return True for is_current except when auto-update fails. + + LiveStreams are always returned by Kernel.__call__() methods, ensuring + that normal kernel usage produces live, up-to-date results. + + Caching behavior: + - last_modified updates whenever content changes + - Can be cached based on dependency timestamps + - Invalidation happens automatically when upstreams change + + Use cases: + - Real-time data processing pipelines + - Reactive user interfaces + - Monitoring and alerting systems + - Dynamic dashboard updates + - Any scenario requiring current data + """ + + def refresh(self, force: bool = False) -> bool: + """ + Manually trigger a refresh of this stream's content. + + Forces the stream to check its upstream dependencies and update + its content if necessary. This is useful when: + - You want to ensure the latest data before a critical operation + - You need to force computation at a specific time + - You're debugging data flow issues + - You want to pre-compute results for performance + Args: + force: If True, always refresh even if the stream is current. + If False, only refresh if the stream is not current. + + Returns: + bool: True if the stream was refreshed, False if it was already current. + Note: LiveStream refreshes automatically on access, so this + method may be a no-op for some implementations. However, it's + always safe to call if you need to control when the cache is refreshed. + """ + ... + + def invalidate(self) -> None: + """ + Mark this stream as invalid, forcing a refresh on next access. + + This method is typically called when: + - Upstream dependencies have changed + - The source kernel has been modified + - External data sources have been updated + - Manual cache invalidation is needed + + The stream will automatically refresh its content the next time + it's accessed (via iteration, as_table(), etc.). + + This is more efficient than immediate refresh when you know the + data will be accessed later. + """ + ... + + +class Kernel(ContentIdentifiable, Labelable, Protocol): + """ + The fundamental unit of computation in Orcapod. + + Kernels are the building blocks of computational graphs, transforming + zero, one, or more input streams into a single output stream. They + encapsulate computation logic while providing consistent interfaces + for validation, type checking, and execution. + + Key design principles: + - Immutable: Kernels don't change after creation + - Deterministic: Same inputs always produce same outputs + - Composable: Kernels can be chained and combined + - Trackable: All invocations are recorded for lineage + - Type-safe: Strong typing and validation throughout + + Execution modes: + - __call__(): Full-featured execution with tracking, returns LiveStream + - forward(): Pure computation without side effects, returns Stream + + The distinction between these modes enables both production use (with + full tracking) and testing/debugging (without side effects). + """ + + @property + def kernel_id(self) -> tuple[str, ...]: + """ + Return a unique identifier for this Pod. + + The pod_id is used for caching and tracking purposes. It should + uniquely identify the Pod's computational logic, parameters, and + any relevant metadata that affects its behavior. + + Returns: + tuple[str, ...]: Unique identifier for this Pod + """ + ... + + @property + def data_context_key(self) -> str: + """ + Return the context key for this kernel's data processing. + + The context key is used to interpret how data columns should be + processed and converted. It provides semantic meaning to the data + being processed by this kernel. + + Returns: + str: Context key for this kernel's data processing + """ + ... + + def __call__( + self, *streams: Stream, label: str | None = None, **kwargs + ) -> LiveStream: + """ + Main interface for kernel invocation with full tracking and guarantees. + + This is the primary way to invoke kernels in production. It provides + a complete execution pipeline: + 1. Validates input streams against kernel requirements + 2. Registers the invocation with the computational graph + 3. Calls forward() to perform the actual computation + 4. Ensures the result is a LiveStream that stays current + + The returned LiveStream automatically stays up-to-date with its + upstream dependencies, making it suitable for real-time processing + and reactive applications. + + Args: + *streams: Input streams to process (can be empty for source kernels) + label: Optional label for this invocation (overrides kernel.label) + **kwargs: Additional arguments for kernel configuration + + Returns: + LiveStream: Live stream that stays up-to-date with upstreams + + Raises: + ValidationError: If input streams are invalid for this kernel + TypeMismatchError: If stream types are incompatible + ValueError: If required arguments are missing + """ + ... + + def forward(self, *streams: Stream) -> Stream: + """ + Perform the actual computation without side effects. + + This method contains the core computation logic and should be + overridden by subclasses. It performs pure computation without: + - Registering with the computational graph + - Performing validation (caller's responsibility) + - Guaranteeing result type (may return static or live streams) + + The returned stream must be accurate at the time of invocation but + need not stay up-to-date with upstream changes. This makes forward() + suitable for: + - Testing and debugging + - Batch processing where currency isn't required + - Internal implementation details + + Args: + *streams: Input streams to process + + Returns: + Stream: Result of the computation (may be static or live) + """ + ... + + def output_types(self, *streams: Stream) -> tuple[TypeSpec, TypeSpec]: + """ + Determine output types without triggering computation. + + This method performs type inference based on input stream types, + enabling efficient type checking and stream property queries. + It should be fast and not trigger any expensive computation. + + Used for: + - Pre-execution type validation + - Query planning and optimization + - Schema inference in complex pipelines + - IDE support and developer tooling + + Args: + *streams: Input streams to analyze + + Returns: + tuple[TypeSpec, TypeSpec]: (tag_types, packet_types) for output + + Raises: + ValidationError: If input types are incompatible + TypeError: If stream types cannot be processed + """ + ... + + def validate_inputs(self, *streams: Stream) -> None: + """ + Validate input streams, raising exceptions if incompatible. + + This method is called automatically by __call__ before computation + to provide fail-fast behavior. It should check: + - Number of input streams + - Stream types and schemas + - Any kernel-specific requirements + - Business logic constraints + + The goal is to catch errors early, before expensive computation + begins, and provide clear error messages for debugging. + + Args: + *streams: Input streams to validate + + Raises: + ValidationError: If streams are invalid for this kernel + TypeError: If stream types are incompatible + ValueError: If stream content violates business rules + """ + ... + + def identity_structure(self, streams: Collection[Stream] | None = None) -> Any: + """ + Generate a unique identity structure for this kernel and/or kernel invocation. + When invoked without streams, it should return a structure + that uniquely identifies the kernel itself (e.g., class name, parameters). + When invoked with streams, it should include the identity of the streams + to distinguish different invocations of the same kernel. + + This structure is used for: + - Caching and memoization + - Debugging and error reporting + - Tracking kernel invocations in computational graphs + + Args: + streams: Optional input streams for this invocation. If None, identity_structure is + based solely on the kernel. If streams are provided, they are included in the identity + to differentiate between different invocations of the same kernel. + + Returns: + Any: Unique identity structure (e.g., tuple of class name and stream identities) + """ + ... + + +class Pod(Kernel, Protocol): + """ + Specialized kernel for packet-level processing with advanced caching. + + Pods represent a different computational model from regular kernels: + - Process data one packet at a time (enabling fine-grained parallelism) + - Support just-in-time evaluation (computation deferred until needed) + - Provide stricter type contracts (clear input/output schemas) + - Enable advanced caching strategies (packet-level caching) + + The Pod abstraction is ideal for: + - Expensive computations that benefit from caching + - Operations that can be parallelized at the packet level + - Transformations with strict type contracts + - Processing that needs to be deferred until access time + - Functions that operate on individual data items + + Pods use a different execution model where computation is deferred + until results are actually needed, enabling efficient resource usage + and fine-grained caching. + """ + + def input_packet_types(self) -> TypeSpec: + """ + TypeSpec for input packets that this Pod can process. + + Defines the exact schema that input packets must conform to. + Pods are typically much stricter about input types than regular + kernels, requiring precise type matching for their packet-level + processing functions. + + This specification is used for: + - Runtime type validation + - Compile-time type checking + - Schema inference and documentation + - Input validation and error reporting + + Returns: + TypeSpec: Dictionary mapping field names to required packet types + """ + ... + + def output_packet_types(self) -> TypeSpec: + """ + TypeSpec for output packets that this Pod produces. + + Defines the schema of packets that will be produced by this Pod. + This is typically determined by the Pod's computational function + and is used for: + - Type checking downstream kernels + - Schema inference in complex pipelines + - Query planning and optimization + - Documentation and developer tooling + + Returns: + TypeSpec: Dictionary mapping field names to output packet types + """ + ... + + def call(self, tag: Tag, packet: Packet) -> tuple[Tag, Packet | None]: + """ + Process a single packet with its associated tag. + + This is the core method that defines the Pod's computational behavior. + It processes one (tag, packet) pair at a time, enabling: + - Fine-grained caching at the packet level + - Parallelization opportunities + - Just-in-time evaluation + - Filtering operations (by returning None) + + The method signature supports: + - Tag transformation (modify metadata) + - Packet transformation (modify content) + - Filtering (return None to exclude packet) + - Pass-through (return inputs unchanged) + + Args: + tag: Metadata associated with the packet + packet: The data payload to process + + Returns: + tuple[Tag, Packet | None]: + - Tag: Output tag (may be modified from input) + - Packet: Processed packet, or None to filter it out + + Raises: + TypeError: If packet doesn't match input_packet_types + ValueError: If packet data is invalid for processing + """ + ... + + +class Source(Kernel, Stream, Protocol): + """ + Entry point for data into the computational graph. + + Sources are special objects that serve dual roles: + - As Kernels: Can be invoked to produce streams + - As Streams: Directly provide data without upstream dependencies + + Sources represent the roots of computational graphs and typically + interface with external data sources. They bridge the gap between + the outside world and the Orcapod computational model. + + Common source types: + - File readers (CSV, JSON, Parquet, etc.) + - Database connections and queries + - API endpoints and web services + - Generated data sources (synthetic data) + - Manual data input and user interfaces + - Message queues and event streams + + Sources have unique properties: + - No upstream dependencies (upstreams is empty) + - Can be both invoked and iterated + - Serve as the starting point for data lineage + - May have their own refresh/update mechanisms + """ + + pass + + +class Tracker(Protocol): + """ + Records kernel invocations and stream creation for computational graph tracking. + + Trackers are responsible for maintaining the computational graph by recording + relationships between kernels, streams, and invocations. They enable: + - Lineage tracking and data provenance + - Caching and memoization strategies + - Debugging and error analysis + - Performance monitoring and optimization + - Reproducibility and auditing + + Multiple trackers can be active simultaneously, each serving different + purposes (e.g., one for caching, another for debugging, another for + monitoring). This allows for flexible and composable tracking strategies. + + Trackers can be selectively activated/deactivated to control overhead + and focus on specific aspects of the computational graph. + """ + + def set_active(self, active: bool = True) -> None: + """ + Set the active state of the tracker. + + When active, the tracker will record all kernel invocations and + stream creations. When inactive, no recording occurs, reducing + overhead for performance-critical sections. + + Args: + active: True to activate recording, False to deactivate + """ + ... + + def is_active(self) -> bool: + """ + Check if the tracker is currently recording invocations. + + Returns: + bool: True if tracker is active and recording, False otherwise + """ + ... + + def record_kernel_invocation( + self, kernel: Kernel, upstreams: tuple[Stream, ...], label: str | None = None + ) -> None: + """ + Record a kernel invocation in the computational graph. + + This method is called whenever a kernel is invoked. The tracker + should record: + - The kernel and its properties + - The input streams that were used as input + - Timing and performance information + - Any relevant metadata + + Args: + kernel: The kernel that was invoked + upstreams: The input streams used for this invocation + """ + ... + + def record_pod_invocation( + self, pod: Pod, upstreams: tuple[Stream, ...], label: str | None = None + ) -> None: + """ + Record a pod invocation in the computational graph. + + This method is called whenever a pod is invoked. The tracker + should record: + - The pod and its properties + - The upstream streams that were used as input + - Timing and performance information + - Any relevant metadata + + Args: + pod: The pod that was invoked + upstreams: The input streams used for this invocation + """ + ... + + +class TrackerManager(Protocol): + """ + Manages multiple trackers and coordinates their activity. + + The TrackerManager provides a centralized way to: + - Register and manage multiple trackers + - Coordinate recording across all active trackers + - Provide a single interface for graph recording + - Enable dynamic tracker registration/deregistration + + This design allows for: + - Multiple concurrent tracking strategies + - Pluggable tracking implementations + - Easy testing and debugging (mock trackers) + - Performance optimization (selective tracking) + """ + + def get_active_trackers(self) -> list[Tracker]: + """ + Get all currently active trackers. + + Returns only trackers that are both registered and active, + providing the list of trackers that will receive recording events. + + Returns: + list[Tracker]: List of trackers that are currently recording + """ + ... + + def register_tracker(self, tracker: Tracker) -> None: + """ + Register a new tracker in the system. + + The tracker will be included in future recording operations + if it is active. Registration is separate from activation + to allow for dynamic control of tracking overhead. + + Args: + tracker: The tracker to register + """ + ... + + def deregister_tracker(self, tracker: Tracker) -> None: + """ + Remove a tracker from the system. + + The tracker will no longer receive recording notifications + even if it is still active. This is useful for: + - Cleaning up temporary trackers + - Removing failed or problematic trackers + - Dynamic tracker management + + Args: + tracker: The tracker to remove + """ + ... + + def record_kernel_invocation( + self, kernel: Kernel, upstreams: tuple[Stream, ...], label: str | None = None + ) -> None: + """ + Record a stream in all active trackers. + + This method broadcasts the stream recording to all currently + active and registered trackers. It provides a single point + of entry for recording events, simplifying kernel implementations. + + Args: + stream: The stream to record in all active trackers + """ + ... + + def record_pod_invocation( + self, pod: Pod, upstreams: tuple[Stream, ...], label: str | None = None + ) -> None: + """ + Record a stream in all active trackers. + + This method broadcasts the stream recording to all currently` + active and registered trackers. It provides a single point + of entry for recording events, simplifying kernel implementations. + + Args: + stream: The stream to record in all active trackers + """ + ... + + def no_tracking(self) -> ContextManager[None]: ... diff --git a/src/orcapod/protocols/hashing_protocols.py b/src/orcapod/protocols/hashing_protocols.py new file mode 100644 index 0000000..16c96cd --- /dev/null +++ b/src/orcapod/protocols/hashing_protocols.py @@ -0,0 +1,146 @@ +"""Hash strategy protocols for dependency injection.""" + +from collections.abc import Callable +from typing import Any, Protocol, runtime_checkable, TYPE_CHECKING +import uuid + +from orcapod.types import TypeSpec, PathLike + +if TYPE_CHECKING: + import pyarrow as pa + + +@runtime_checkable +class ContentIdentifiable(Protocol): + """Protocol for objects that can provide an identity structure.""" + + def identity_structure(self) -> Any: + """ + Return a structure that represents the identity of this object. + + Returns: + Any: A structure representing this object's content. + Should be deterministic and include all identity-relevant data. + Return None to indicate no custom identity is available. + """ + ... + + def __eq__(self, other: object) -> bool: + """ + Equality check that compares the identity structures of two objects. + + Args: + other (object): The object to compare with. + + Returns: + bool: True if the identity structures are equal, False otherwise. + """ + ... + + def __hash__(self) -> int: + """ + Hash implementation that uses the identity structure if provided, + otherwise falls back to the default hash. + + Returns: + int: A hash value based on either content or identity. + """ + ... + + +class ObjectHasher(Protocol): + """Protocol for general object hashing.""" + + # TODO: consider more explicitly stating types of objects accepted + def hash(self, obj: Any) -> bytes: + """ + Hash an object to a byte representation. + + Args: + obj (Any): The object to hash. + + Returns: + bytes: The byte representation of the hash. + """ + ... + + def get_hasher_id(self) -> str: + """ + Returns a unique identifier/name assigned to the hasher + """ + ... + + def hash_to_hex( + self, obj: Any, char_count: int | None = None, prefix_hasher_id: bool = False + ) -> str: ... + + def hash_to_int(self, obj: Any, hexdigits: int = 16) -> int: + """ + Hash an object to an integer. + + Args: + obj (Any): The object to hash. + hexdigits (int): Number of hexadecimal digits to use for the hash. + + Returns: + int: The integer representation of the hash. + """ + ... + + def hash_to_uuid( + self, obj: Any, namespace: uuid.UUID = uuid.NAMESPACE_OID + ) -> uuid.UUID: ... + + +class FileContentHasher(Protocol): + """Protocol for file-related hashing.""" + + def hash_file(self, file_path: PathLike) -> bytes: ... + + +class ArrowHasher(Protocol): + """Protocol for hashing arrow packets.""" + + def get_hasher_id(self) -> str: ... + + def hash_table(self, table: "pa.Table", prefix_hasher_id: bool = True) -> str: ... + + +class StringCacher(Protocol): + """Protocol for caching string key value pairs.""" + + def get_cached(self, cache_key: str) -> str | None: ... + def set_cached(self, cache_key: str, value: str) -> None: ... + def clear_cache(self) -> None: ... + + +class FunctionInfoExtractor(Protocol): + """Protocol for extracting function information.""" + + def extract_function_info( + self, + func: Callable[..., Any], + function_name: str | None = None, + input_typespec: TypeSpec | None = None, + output_typespec: TypeSpec | None = None, + ) -> dict[str, Any]: ... + + +class SemanticTypeHasher(Protocol): + """Abstract base class for semantic type-specific hashers.""" + + @property + def hasher_id(self) -> str: + """Unique identifier for this semantic type hasher.""" + ... + + def hash_column( + self, + column: "pa.Array", + ) -> "pa.Array": + """Hash a column with this semantic type and return the hash bytes.""" + ... + + def set_cacher(self, cacher: StringCacher) -> None: + """Add a string cacher for caching hash values.""" + ... diff --git a/src/orcapod/protocols/semantic_protocols.py b/src/orcapod/protocols/semantic_protocols.py new file mode 100644 index 0000000..5458cad --- /dev/null +++ b/src/orcapod/protocols/semantic_protocols.py @@ -0,0 +1,38 @@ +from typing import Protocol, Any + + +class TypeHandler(Protocol): + """Protocol for handling conversion between Python type and Arrow + data types used for storage. + + The handler itself IS the definition of a semantic type. The semantic type + name/identifier is provided by the registerer when registering the handler. + + TypeHandlers should clearly communicate what Python types they can handle, + and focus purely on conversion logic. + """ + + def python_type(self) -> type: + """Return the Python type(s) this handler can process. + + Returns: + Python type the handler supports + + Examples: + - PathHandler: return Path + - NumericHandler: return (int, float) + - CollectionHandler: return (list, tuple, set) + """ + ... + + def storage_type(self) -> type: + """Return the Arrow DataType instance for schema definition.""" + ... + + def python_to_storage(self, value: Any) -> Any: + """Convert Python value to Arrow-compatible storage representation.""" + ... + + def storage_to_python(self, value: Any) -> Any: + """Convert storage representation back to Python object.""" + ... diff --git a/src/orcapod/protocols/store_protocols.py b/src/orcapod/protocols/store_protocols.py new file mode 100644 index 0000000..4940033 --- /dev/null +++ b/src/orcapod/protocols/store_protocols.py @@ -0,0 +1,49 @@ +from typing import Protocol +from collections.abc import Collection +import pyarrow as pa + + +class ArrowDataStore(Protocol): + def add_record( + self, + record_path: tuple[str, ...], + record_id: str, + data: pa.Table, + ignore_duplicates: bool | None = None, + overwrite_existing: bool = False, + ) -> str | None: ... + + def add_records( + self, + record_path: tuple[str, ...], + records: pa.Table, + record_id_column: str | None = None, + ignore_duplicates: bool | None = None, + overwrite_existing: bool = False, + ) -> list[str]: ... + + def get_record_by_id( + self, + record_path: tuple[str, ...], + record_id: str, + record_id_column: str | None = None, + ) -> pa.Table | None: ... + + def get_all_records( + self, + record_path: tuple[str, ...], + record_id_column: str | None = None, + ) -> pa.Table | None: + """Retrieve all records for a given path as a stream.""" + ... + + def get_records_by_ids( + self, + record_path: tuple[str, ...], + record_ids: Collection[str], + record_id_column: str | None = None, + ) -> pa.Table: ... + + def flush(self) -> None: + """Flush any buffered writes to the underlying storage.""" + ... diff --git a/src/orcapod/store/__init__.py b/src/orcapod/store/__init__.py deleted file mode 100644 index f573c4d..0000000 --- a/src/orcapod/store/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .types import DataStore, ArrowDataStore -from .core import DirDataStore, NoOpDataStore -from .safe_dir_data_store import SafeDirDataStore - -__all__ = [ - "DataStore", - "ArrowDataStore", - "DirDataStore", - "SafeDirDataStore", - "NoOpDataStore", -] diff --git a/src/orcapod/store/file.py b/src/orcapod/store/file.py deleted file mode 100644 index 0de8aff..0000000 --- a/src/orcapod/store/file.py +++ /dev/null @@ -1,159 +0,0 @@ -import builtins -import contextlib -import inspect -import os -from pathlib import Path -from typing import Callable, Collection, Dict, Optional, Tuple, Union - -from orcapod.types import Packet, PathSet - - -@contextlib.contextmanager -def redirect_open( - mapping: Union[Dict[str, str], Callable[[str], Optional[str]]], -): - """ - Context manager to intercept file opening operations. - - Args: - mapping: Either a dictionary mapping original paths to their replacements, - or a function that takes a path string and returns a replacement path - (or None to indicate the file should not be opened). - - Raises: - FileNotFoundError: If using a dictionary and the path is not found in it. - """ - # Track all places that might store an open() function - places_to_patch = [] - - # 1. Standard builtins.open - original_builtin_open = builtins.open - places_to_patch.append((builtins, "open", original_builtin_open)) - - # 2. __builtins__ (could be different in some contexts, especially IPython) - if isinstance(__builtins__, dict) and "open" in __builtins__: - places_to_patch.append((__builtins__, "open", __builtins__["open"])) - - # 3. Current module's globals (for the calling namespace) - current_frame = inspect.currentframe() - if current_frame is not None: - caller_globals = current_frame.f_back.f_globals if current_frame.f_back else {} - if "open" in caller_globals: - places_to_patch.append((caller_globals, "open", caller_globals["open"])) - - # 4. Check for IPython user namespace - try: - import IPython - - ip = IPython.get_ipython() # type: ignore - if ip and "open" in ip.user_ns: - places_to_patch.append((ip.user_ns, "open", ip.user_ns["open"])) - except (ImportError, AttributeError): - pass - - def patched_open(file, *args, **kwargs): - # Convert PathLike objects to string if needed - if hasattr(file, "__fspath__"): - file_path = os.fspath(file) - else: - file_path = str(file) - - if isinstance(mapping, dict): - if file_path in mapping: - redirected_path = mapping[file_path] - print(f"Redirecting '{file_path}' to '{redirected_path}'") - return original_builtin_open(redirected_path, *args, **kwargs) - else: - raise FileNotFoundError( - f"Path '{file_path}' not found in redirection mapping" - ) - else: # mapping is a function - redirected_path = mapping(file_path) - if redirected_path is not None: - print(f"Redirecting '{file_path}' to '{redirected_path}'") - return original_builtin_open(redirected_path, *args, **kwargs) - else: - raise FileNotFoundError(f"Path '{file_path}' could not be redirected") - - # Apply the patch to all places - for obj, attr, _ in places_to_patch: - if isinstance(obj, dict): - obj[attr] = patched_open - else: - setattr(obj, attr, patched_open) - - try: - yield - finally: - # Restore all original functions - for obj, attr, original in places_to_patch: - if isinstance(obj, dict): - obj[attr] = original - else: - setattr(obj, attr, original) - - -def virtual_mount( - packet: Packet, -) -> Tuple[Packet, Dict[str, str], Dict[str, str]]: - """ - Visit all pathset within the packet, and convert them to alternative path - representation. By default, full path is mapped to the file name. If two or - more paths have the same file name, the second one is suffixed with "_1", the - third one with "_2", etc. This is useful for creating a virtual mount point - for a set of files, where the original paths are not important, but the file - names can be used to identify the files. - """ - forward_lut = {} # mapping from original path to new path - reverse_lut = {} # mapping from new path to original path - new_packet = {} - - for key, value in packet.items(): - new_packet[key] = convert_pathset(value, forward_lut, reverse_lut) - - return new_packet, forward_lut, reverse_lut - - -# TODO: re-assess the structure of PathSet and consider making it recursive -def convert_pathset(pathset: PathSet, forward_lut, reverse_lut) -> PathSet: - """ - Convert a pathset to a new pathset. forward_lut and reverse_lut are updated - with the new paths. The new paths are created by replacing the original paths - with the new paths in the forward_lut. The reverse_lut is updated with the - original paths. If name already exists, a suffix is added to the new name to avoid - collisions. - """ - if isinstance(pathset, (str, bytes)): - new_name = Path(pathset).name - if new_name in reverse_lut: - # if the name already exists, add a suffix - i = 1 - while f"{new_name}_{i}" in reverse_lut: - i += 1 - new_name = f"{new_name}_{i}" - forward_lut[pathset] = new_name - reverse_lut[new_name] = pathset - return new_name - elif isinstance(pathset, Collection): - return [convert_pathset(p, forward_lut, reverse_lut) for p in pathset] # type: ignore - else: - raise ValueError( - f"Unsupported pathset type: {type(pathset)}. Expected str, bytes, or Collection." - ) - - -class WrappedPath: - def __init__(self, path, name=None): - self.path = Path(path) - if name is None: - name = self.path.name - self.name = name - - def __fspath__(self) -> Union[str, bytes]: - return self.path.__fspath__() - - def __str__(self) -> str: - return self.name - - def __repr__(self) -> str: - return f"WrappedPath({self.path}): {self.name}" diff --git a/src/orcapod/stores/__init__.py b/src/orcapod/stores/__init__.py new file mode 100644 index 0000000..434e2f4 --- /dev/null +++ b/src/orcapod/stores/__init__.py @@ -0,0 +1,14 @@ +# from .legacy.types import DataStore, ArrowDataStore +# from .legacy.legacy_arrow_data_stores import MockArrowDataStore, SimpleParquetDataStore +# from .legacy.dict_data_stores import DirDataStore, NoOpDataStore +# from .legacy.safe_dir_data_store import SafeDirDataStore + +# __all__ = [ +# "DataStore", +# "ArrowDataStore", +# "DirDataStore", +# "SafeDirDataStore", +# "NoOpDataStore", +# "MockArrowDataStore", +# "SimpleParquetDataStore", +# ] diff --git a/src/orcapod/stores/delta_lake_stores.py b/src/orcapod/stores/delta_lake_stores.py new file mode 100644 index 0000000..213ea3e --- /dev/null +++ b/src/orcapod/stores/delta_lake_stores.py @@ -0,0 +1,816 @@ +import pyarrow as pa +import polars as pl +from pathlib import Path +from typing import Any +import logging +from deltalake import DeltaTable, write_deltalake +from deltalake.exceptions import TableNotFoundError +from collections import defaultdict +from orcapod.data import constants + + +# Module-level logger +logger = logging.getLogger(__name__) + + +class BasicDeltaTableArrowStore: + """ + A basic Delta Table-based Arrow data store with flexible hierarchical path support. + This store does NOT implement lazy loading or streaming capabilities, therefore + being "basic" in that sense. It is designed for simple use cases where data is written + in batches and read back as complete tables. It is worth noting that the Delta table + structure created by this store IS compatible with more advanced Delta Table-based + data stores (to be implemented) that will support lazy loading and streaming. + + Uses tuple-based source paths for robust parameter handling: + - ("source_name", "source_id") -> source_name/source_id/ + - ("org", "project", "dataset") -> org/project/dataset/ + - ("year", "month", "day", "experiment") -> year/month/day/experiment/ + """ + + RECORD_ID_COLUMN = f"{constants.META_PREFIX}record_id" + + def __init__( + self, + base_path: str | Path, + duplicate_entry_behavior: str = "error", + create_base_path: bool = True, + max_hierarchy_depth: int = 10, + batch_size: int = 100, + ): + """ + Initialize the BasicDeltaTableArrowStore. + + Args: + base_path: Base directory path where Delta tables will be stored + duplicate_entry_behavior: How to handle duplicate record_ids: + - 'error': Raise ValueError when record_id already exists + - 'overwrite': Replace existing entry with new data + create_base_path: Whether to create the base path if it doesn't exist + max_hierarchy_depth: Maximum allowed depth for source paths (safety limit) + batch_size: Number of records to batch before writing to Delta table + """ + # Validate duplicate behavior + if duplicate_entry_behavior not in ["error", "overwrite"]: + raise ValueError("duplicate_entry_behavior must be 'error' or 'overwrite'") + + self.duplicate_entry_behavior = duplicate_entry_behavior + self.base_path = Path(base_path) + self.max_hierarchy_depth = max_hierarchy_depth + self.batch_size = batch_size + + if create_base_path: + self.base_path.mkdir(parents=True, exist_ok=True) + elif not self.base_path.exists(): + raise ValueError( + f"Base path {self.base_path} does not exist and create_base_path=False" + ) + + # Cache for Delta tables to avoid repeated initialization + self._delta_table_cache: dict[str, DeltaTable] = {} + + # Batch management + self._pending_batches: dict[str, dict[str, pa.Table]] = defaultdict(dict) + + logger.info( + f"Initialized DeltaTableArrowDataStore at {self.base_path} " + f"with duplicate_entry_behavior='{duplicate_entry_behavior}', " + f"batch_size={batch_size}, as" + ) + + def flush(self) -> None: + """ + Flush all pending batches immediately. + + This method is called to ensure all pending data is written to the Delta tables. + """ + try: + self.flush_all_batches() + except Exception as e: + logger.error(f"Error during flush: {e}") + + def flush_batch(self, record_path: tuple[str, ...]) -> None: + """ + Flush pending batch for a specific source path. + + Args: + record_path: Tuple of path components + """ + logger.debug("Flushing triggered!!") + source_key = self._get_source_key(record_path) + + if ( + source_key not in self._pending_batches + or not self._pending_batches[source_key] + ): + return + + # Get all pending records + pending_tables = self._pending_batches[source_key] + self._pending_batches[source_key] = {} + + try: + # Combine all tables in the batch + combined_table = pa.concat_tables(pending_tables.values()).combine_chunks() + + table_path = self._get_table_path(record_path) + table_path.mkdir(parents=True, exist_ok=True) + + # Check if table exists + delta_table = self._get_existing_delta_table(record_path) + + if delta_table is None: + # TODO: reconsider mode="overwrite" here + write_deltalake( + table_path, + combined_table, + mode="overwrite", + ) + logger.debug( + f"Created new Delta table for {source_key} with {len(combined_table)} records" + ) + else: + if self.duplicate_entry_behavior == "overwrite": + # Get entry IDs from the batch + record_ids = combined_table.column( + self.RECORD_ID_COLUMN + ).to_pylist() + unique_record_ids = list(set(record_ids)) + + # Delete existing records with these IDs + if unique_record_ids: + record_ids_str = "', '".join(unique_record_ids) + delete_predicate = ( + f"{self.RECORD_ID_COLUMN} IN ('{record_ids_str}')" + ) + try: + delta_table.delete(delete_predicate) + logger.debug( + f"Deleted {len(unique_record_ids)} existing records from {source_key}" + ) + except Exception as e: + logger.debug( + f"No existing records to delete from {source_key}: {e}" + ) + + # otherwise, only insert if same record_id does not exist yet + delta_table.merge( + source=combined_table, + predicate=f"target.{self.RECORD_ID_COLUMN} = source.{self.RECORD_ID_COLUMN}", + source_alias="source", + target_alias="target", + ).when_not_matched_insert_all().execute() + + logger.debug( + f"Appended batch of {len(combined_table)} records to {source_key}" + ) + + # Update cache + self._delta_table_cache[source_key] = DeltaTable(str(table_path)) + + except Exception as e: + logger.error(f"Error flushing batch for {source_key}: {e}") + # Put the tables back in the pending queue + self._pending_batches[source_key] = pending_tables + raise + + def flush_all_batches(self) -> None: + """Flush all pending batches.""" + source_keys = list(self._pending_batches.keys()) + + # TODO: capture and re-raise exceptions at the end + for source_key in source_keys: + record_path = tuple(source_key.split("/")) + try: + self.flush_batch(record_path) + except Exception as e: + logger.error(f"Error flushing batch for {source_key}: {e}") + + def __del__(self): + """Cleanup when object is destroyed.""" + self.flush() + + def _validate_record_path(self, record_path: tuple[str, ...]) -> None: + # TODO: consider removing this as path creation can be tried directly + """ + Validate source path components. + + Args: + record_path: Tuple of path components + + Raises: + ValueError: If path is invalid + """ + if not record_path: + raise ValueError("Source path cannot be empty") + + if len(record_path) > self.max_hierarchy_depth: + raise ValueError( + f"Source path depth {len(record_path)} exceeds maximum {self.max_hierarchy_depth}" + ) + + # Validate path components + for i, component in enumerate(record_path): + if not component or not isinstance(component, str): + raise ValueError( + f"Source path component {i} is invalid: {repr(component)}" + ) + + # Check for filesystem-unsafe characters + unsafe_chars = ["/", "\\", ":", "*", "?", '"', "<", ">", "|", "\0"] + if any(char in component for char in unsafe_chars): + raise ValueError( + f"Source path {record_path} component {component} contains invalid characters: {repr(component)}" + ) + + def _get_source_key(self, record_path: tuple[str, ...]) -> str: + """Generate cache key for source storage.""" + return "/".join(record_path) + + def _get_table_path(self, record_path: tuple[str, ...]) -> Path: + """Get the filesystem path for a given source path.""" + path = self.base_path + for subpath in record_path: + path = path / subpath + return path + + def _get_existing_delta_table( + self, record_path: tuple[str, ...] + ) -> DeltaTable | None: + """ + Get or create a Delta table, handling schema initialization properly. + + Args: + record_path: Tuple of path components + + Returns: + DeltaTable instance or None if table doesn't exist + """ + source_key = self._get_source_key(record_path) + table_path = self._get_table_path(record_path) + + # Check cache first + if dt := self._delta_table_cache.get(source_key): + return dt + + try: + # Try to load existing table + delta_table = DeltaTable(str(table_path)) + self._delta_table_cache[source_key] = delta_table + logger.debug(f"Loaded existing Delta table for {source_key}") + return delta_table + except TableNotFoundError: + # Table doesn't exist + return None + except Exception as e: + logger.error(f"Error loading Delta table for {source_key}: {e}") + # Try to clear any corrupted cache and retry once + if source_key in self._delta_table_cache: + del self._delta_table_cache[source_key] + return None + + def _ensure_record_id_column( + self, arrow_data: pa.Table, record_id: str + ) -> pa.Table: + """Ensure the table has an record id column.""" + if self.RECORD_ID_COLUMN not in arrow_data.column_names: + # Add record_id column at the beginning + key_array = pa.array([record_id] * len(arrow_data), type=pa.large_string()) + arrow_data = arrow_data.add_column(0, self.RECORD_ID_COLUMN, key_array) + return arrow_data + + def _remove_record_id_column(self, arrow_data: pa.Table) -> pa.Table: + """Remove the record id column if it exists.""" + if self.RECORD_ID_COLUMN in arrow_data.column_names: + column_names = arrow_data.column_names + indices_to_keep = [ + i + for i, name in enumerate(column_names) + if name != self.RECORD_ID_COLUMN + ] + arrow_data = arrow_data.select(indices_to_keep) + return arrow_data + + def _handle_record_id_column( + self, arrow_data: pa.Table, record_id_column: str | None = None + ) -> pa.Table: + """ + Handle record_id column based on add_record_id_column parameter. + + Args: + arrow_data: Arrow table with record id column + record_id_column: Control entry ID column inclusion: + + """ + if not record_id_column: + # Remove the record id column + return self._remove_record_id_column(arrow_data) + + # Rename record id column + if self.RECORD_ID_COLUMN in arrow_data.column_names: + schema = arrow_data.schema + new_names = [ + record_id_column if name == self.RECORD_ID_COLUMN else name + for name in schema.names + ] + return arrow_data.rename_columns(new_names) + else: + raise ValueError( + f"Record ID column '{self.RECORD_ID_COLUMN}' not found in the table and cannot be renamed." + ) + + def _create_record_id_filter(self, record_id: str) -> list: + """ + Create a proper filter expression for Delta Lake. + + Args: + record_id: The entry ID to filter by + + Returns: + List containing the filter expression for Delta Lake + """ + return [(self.RECORD_ID_COLUMN, "=", record_id)] + + def _create_record_ids_filter(self, record_ids: list[str]) -> list: + """ + Create a proper filter expression for multiple entry IDs. + + Args: + record_ids: List of entry IDs to filter by + + Returns: + List containing the filter expression for Delta Lake + """ + return [(self.RECORD_ID_COLUMN, "in", record_ids)] + + def _read_table_with_filter( + self, + delta_table: DeltaTable, + filters: list | None = None, + ) -> pa.Table: + """ + Read table using to_pyarrow_dataset with original schema preservation. + + Args: + delta_table: The Delta table to read from + filters: Optional filters to apply + + Returns: + Arrow table with preserved schema + """ + # Use to_pyarrow_dataset with as_large_types for Polars compatible arrow table loading + dataset = delta_table.to_pyarrow_dataset(as_large_types=True) + if filters: + # Apply filters at dataset level for better performance + import pyarrow.compute as pc + + filter_expr = None + for filt in filters: + if len(filt) == 3: + col, op, val = filt + if op == "=": + expr = pc.equal(pc.field(col), pa.scalar(val)) # type: ignore + elif op == "in": + expr = pc.is_in(pc.field(col), pa.array(val)) # type: ignore + else: + logger.warning( + f"Unsupported filter operation: {op}. Falling back to table-level filter application which may be less efficient." + ) + # Fallback to table-level filtering + return dataset.to_table()(filters=filters) + + if filter_expr is None: + filter_expr = expr + else: + filter_expr = pc.and_(filter_expr, expr) # type: ignore + + if filter_expr is not None: + return dataset.to_table(filter=filter_expr) + + return dataset.to_table() + + def add_record( + self, + record_path: tuple[str, ...], + record_id: str, + data: pa.Table, + ignore_duplicates: bool | None = None, + overwrite_existing: bool = False, + force_flush: bool = False, + ) -> pa.Table: + self._validate_record_path(record_path) + source_key = self._get_source_key(record_path) + + # Check for existing entry + if ignore_duplicates is None: + ignore_duplicates = self.duplicate_entry_behavior != "error" + if not ignore_duplicates: + pending_table = self._pending_batches[source_key].get(record_id, None) + if pending_table is not None: + raise ValueError( + f"Entry '{record_id}' already exists in pending batch for {source_key}. " + f"Use duplicate_entry_behavior='overwrite' to allow updates." + ) + existing_record = self.get_record_by_id(record_path, record_id, flush=False) + if existing_record is not None: + raise ValueError( + f"Entry '{record_id}' already exists in {'/'.join(record_path)}. " + f"Use duplicate_entry_behavior='overwrite' to allow updates." + ) + + # Add record_id column to the data + data_with_record_id = self._ensure_record_id_column(data, record_id) + + if force_flush: + # Write immediately + table_path = self._get_table_path(record_path) + table_path.mkdir(parents=True, exist_ok=True) + + delta_table = self._get_existing_delta_table(record_path) + + if delta_table is None: + # Create new table - save original schema first + write_deltalake(str(table_path), data_with_record_id, mode="overwrite") + logger.debug(f"Created new Delta table for {source_key}") + else: + if self.duplicate_entry_behavior == "overwrite": + try: + delta_table.delete( + f"{self.RECORD_ID_COLUMN} = '{record_id.replace(chr(39), chr(39) + chr(39))}'" + ) + logger.debug( + f"Deleted existing record {record_id} from {source_key}" + ) + except Exception as e: + logger.debug( + f"No existing record to delete for {record_id}: {e}" + ) + + write_deltalake( + table_path, + data_with_record_id, + mode="append", + schema_mode="merge", + ) + + # Update cache + self._delta_table_cache[source_key] = DeltaTable(str(table_path)) + else: + # Add to the batch for later flushing + self._pending_batches[source_key][record_id] = data_with_record_id + batch_size = len(self._pending_batches[source_key]) + + # Check if we need to flush + if batch_size >= self.batch_size: + self.flush_batch(record_path) + + logger.debug(f"Added record {record_id} to {source_key}") + return data + + def add_records( + self, + record_path: tuple[str, ...], + records: pa.Table, + record_id_column: str | None = None, + ignore_duplicates: bool | None = None, + overwrite_existing: bool = False, + ) -> list[str]: + raise NotImplementedError( + "add_records is not implemented in BasicDeltaTableArrowStore yet. " + "Use add_record for single record insertion." + ) + + def get_record_by_id( + self, + record_path: tuple[str, ...], + record_id: str, + record_id_column: str | None = None, + flush: bool = False, + ) -> pa.Table | None: + """ + Get a specific record by record_id with schema preservation. + + Args: + record_path: Tuple of path components + record_id: Unique identifier for the record + + Returns: + Arrow table for the record or None if not found + """ + + if flush: + self.flush_batch(record_path) + self._validate_record_path(record_path) + + # check if record_id is found in pending batches + source_key = self._get_source_key(record_path) + if record_id in self._pending_batches[source_key]: + # Return the pending record after removing the entry id column + return self._remove_record_id_column( + self._pending_batches[source_key][record_id] + ) + + delta_table = self._get_existing_delta_table(record_path) + if delta_table is None: + return None + + try: + # Use schema-preserving read + filter_expr = self._create_record_id_filter(record_id) + result = self._read_table_with_filter(delta_table, filters=filter_expr) + + if len(result) == 0: + return None + + # Handle (remove/rename) the record id column before returning + return self._handle_record_id_column(result, record_id_column) + + except Exception as e: + logger.error( + f"Error getting record {record_id} from {'/'.join(record_path)}: {e}" + ) + raise e + + def get_all_records( + self, + record_path: tuple[str, ...], + record_id_column: str | None = None, + retrieve_pending: bool = True, + flush: bool = False, + ) -> pa.Table | None: + """ + Retrieve all records for a given source path as a single table with schema preservation. + + Args: + record_path: Tuple of path components + record_id_column: If not None or empty, record id is returned in the result with the specified column name + + Returns: + Arrow table containing all records with original schema, or None if no records found + """ + # TODO: this currently reads everything into memory and then return. Consider implementation that performs everything lazily + + if flush: + self.flush_batch(record_path) + self._validate_record_path(record_path) + + collected_tables = [] + if retrieve_pending: + # Check if there are pending records in the batch + for record_id, arrow_table in self._pending_batches[ + self._get_source_key(record_path) + ].items(): + collected_tables.append( + self._ensure_record_id_column(arrow_table, record_id) + ) + + delta_table = self._get_existing_delta_table(record_path) + if delta_table is not None: + try: + # Use filter-based read + result = self._read_table_with_filter(delta_table) + + if len(result) != 0: + collected_tables.append(result) + + except Exception as e: + logger.error( + f"Error getting all records from {'/'.join(record_path)}: {e}" + ) + if collected_tables: + total_table = pa.concat_tables(collected_tables) + + # Handle record_id column based on parameter + return self._handle_record_id_column(total_table, record_id_column) + + return None + + def get_records_by_ids( + self, + record_path: tuple[str, ...], + record_ids: list[str] | pl.Series | pa.Array, + record_id_column: str | None = None, + flush: bool = False, + ) -> pa.Table | None: + """ + Retrieve records by entry IDs as a single table with schema preservation. + + Args: + record_path: Tuple of path components + record_ids: Entry IDs to retrieve + add_record_id_column: Control entry ID column inclusion + preserve_input_order: If True, return results in input order with nulls for missing + + Returns: + Arrow table containing all found records with original schema, or None if no records found + """ + + if flush: + self.flush_batch(record_path) + + self._validate_record_path(record_path) + + # Convert input to list of strings for consistency + if isinstance(record_ids, list): + if not record_ids: + return None + record_ids_list = record_ids + elif isinstance(record_ids, pl.Series): + if len(record_ids) == 0: + return None + record_ids_list = record_ids.to_list() + elif isinstance(record_ids, (pa.Array, pa.ChunkedArray)): + if len(record_ids) == 0: + return None + record_ids_list = record_ids.to_pylist() + else: + raise TypeError( + f"record_ids must be list[str], pl.Series, or pa.Array, got {type(record_ids)}" + ) + + delta_table = self._get_existing_delta_table(record_path) + if delta_table is None: + return None + + try: + # Use schema-preserving read with filters + filter_expr = self._create_record_ids_filter(record_ids_list) + result = self._read_table_with_filter(delta_table, filters=filter_expr) + + if len(result) == 0: + return None + + # Handle record_id column based on parameter + return self._handle_record_id_column(result, record_id_column) + + except Exception as e: + logger.error( + f"Error getting records by IDs from {'/'.join(record_path)}: {e}" + ) + return None + + def get_pending_batch_info(self) -> dict[str, int]: + """ + Get information about pending batches. + + Returns: + Dictionary mapping source keys to number of pending records + """ + return { + source_key: len(tables) + for source_key, tables in self._pending_batches.items() + if tables + } + + def list_sources(self) -> list[tuple[str, ...]]: + """ + List all available source paths. + + Returns: + List of source path tuples + """ + sources = [] + + def _scan_directory(current_path: Path, path_components: tuple[str, ...]): + """Recursively scan for Delta tables.""" + for item in current_path.iterdir(): + if not item.is_dir(): + continue + + new_path_components = path_components + (item.name,) + + # Check if this directory contains a Delta table + try: + DeltaTable(str(item)) + sources.append(new_path_components) + except TableNotFoundError: + # Not a Delta table, continue scanning subdirectories + if len(new_path_components) < self.max_hierarchy_depth: + _scan_directory(item, new_path_components) + + _scan_directory(self.base_path, ()) + return sources + + def delete_source(self, record_path: tuple[str, ...]) -> bool: + """ + Delete an entire source (all records for a source path). + + Args: + record_path: Tuple of path components + + Returns: + True if source was deleted, False if it didn't exist + """ + self._validate_record_path(record_path) + + # Flush any pending batches first + self.flush_batch(record_path) + + table_path = self._get_table_path(record_path) + source_key = self._get_source_key(record_path) + + if not table_path.exists(): + return False + + try: + # Remove from caches + if source_key in self._delta_table_cache: + del self._delta_table_cache[source_key] + + # Remove directory + import shutil + + shutil.rmtree(table_path) + + logger.info(f"Deleted source {source_key}") + return True + + except Exception as e: + logger.error(f"Error deleting source {source_key}: {e}") + return False + + def delete_record(self, record_path: tuple[str, ...], record_id: str) -> bool: + """ + Delete a specific record. + + Args: + record_path: Tuple of path components + record_id: ID of the record to delete + + Returns: + True if record was deleted, False if it didn't exist + """ + self._validate_record_path(record_path) + + # Flush any pending batches first + self.flush_batch(record_path) + + delta_table = self._get_existing_delta_table(record_path) + if delta_table is None: + return False + + try: + # Check if record exists using proper filter + filter_expr = self._create_record_id_filter(record_id) + existing = self._read_table_with_filter(delta_table, filters=filter_expr) + if len(existing) == 0: + return False + + # Delete the record using SQL-style predicate (this is correct for delete operations) + delta_table.delete( + f"{self.RECORD_ID_COLUMN} = '{record_id.replace(chr(39), chr(39) + chr(39))}'" + ) + + # Update cache + source_key = self._get_source_key(record_path) + self._delta_table_cache[source_key] = delta_table + + logger.debug(f"Deleted record {record_id} from {'/'.join(record_path)}") + return True + + except Exception as e: + logger.error( + f"Error deleting record {record_id} from {'/'.join(record_path)}: {e}" + ) + return False + + def get_table_info(self, record_path: tuple[str, ...]) -> dict[str, Any] | None: + """ + Get metadata information about a Delta table. + + Args: + record_path: Tuple of path components + + Returns: + Dictionary with table metadata, or None if table doesn't exist + """ + self._validate_record_path(record_path) + + delta_table = self._get_existing_delta_table(record_path) + if delta_table is None: + return None + + try: + # Get basic info + schema = delta_table.schema() + history = delta_table.history() + source_key = self._get_source_key(record_path) + + # Add pending batch info + pending_info = self.get_pending_batch_info() + pending_count = pending_info.get(source_key, 0) + + return { + "path": str(self._get_table_path(record_path)), + "record_path": record_path, + "schema": schema, + "version": delta_table.version(), + "num_files": len(delta_table.files()), + "history_length": len(history), + "latest_commit": history[0] if history else None, + "pending_records": pending_count, + } + + except Exception as e: + logger.error(f"Error getting table info for {'/'.join(record_path)}: {e}") + return None diff --git a/src/orcapod/store/file_ops.py b/src/orcapod/stores/file_utils.py similarity index 58% rename from src/orcapod/store/file_ops.py rename to src/orcapod/stores/file_utils.py index 0e34213..712aada 100644 --- a/src/orcapod/store/file_ops.py +++ b/src/orcapod/stores/file_utils.py @@ -1,10 +1,15 @@ # file_ops.py - Atomic file operations module +import builtins +import contextlib +import inspect import logging import os from pathlib import Path -from orcapod.types import PathLike +from orcapod.types import PathLike, PathSet, PacketLike +from collections.abc import Collection, Callable + logger = logging.getLogger(__name__) @@ -276,3 +281,154 @@ def is_file_locked(file_path: PathLike) -> bool: except Exception: # Any other exception - assume not locked return False + + +@contextlib.contextmanager +def redirect_open( + mapping: dict[str, str] | Callable[[str], str | None], +): + """ + Context manager to intercept file opening operations. + + Args: + mapping: Either a dictionary mapping original paths to their replacements, + or a function that takes a path string and returns a replacement path + (or None to indicate the file should not be opened). + + Raises: + FileNotFoundError: If using a dictionary and the path is not found in it. + """ + # Track all places that might store an open() function + places_to_patch = [] + + # 1. Standard builtins.open + original_builtin_open = builtins.open + places_to_patch.append((builtins, "open", original_builtin_open)) + + # 2. __builtins__ (could be different in some contexts, especially IPython) + if isinstance(__builtins__, dict) and "open" in __builtins__: + places_to_patch.append((__builtins__, "open", __builtins__["open"])) + + # 3. Current module's globals (for the calling namespace) + current_frame = inspect.currentframe() + if current_frame is not None: + caller_globals = current_frame.f_back.f_globals if current_frame.f_back else {} + if "open" in caller_globals: + places_to_patch.append((caller_globals, "open", caller_globals["open"])) + + # 4. Check for IPython user namespace + try: + import IPython + + ip = IPython.get_ipython() # type: ignore + if ip and "open" in ip.user_ns: + places_to_patch.append((ip.user_ns, "open", ip.user_ns["open"])) + except (ImportError, AttributeError): + pass + + def patched_open(file, *args, **kwargs): + # Convert PathLike objects to string if needed + if hasattr(file, "__fspath__"): + file_path = os.fspath(file) + else: + file_path = str(file) + + if isinstance(mapping, dict): + if file_path in mapping: + redirected_path = mapping[file_path] + print(f"Redirecting '{file_path}' to '{redirected_path}'") + return original_builtin_open(redirected_path, *args, **kwargs) + else: + raise FileNotFoundError( + f"Path '{file_path}' not found in redirection mapping" + ) + else: # mapping is a function + redirected_path = mapping(file_path) + if redirected_path is not None: + print(f"Redirecting '{file_path}' to '{redirected_path}'") + return original_builtin_open(redirected_path, *args, **kwargs) + else: + raise FileNotFoundError(f"Path '{file_path}' could not be redirected") + + # Apply the patch to all places + for obj, attr, _ in places_to_patch: + if isinstance(obj, dict): + obj[attr] = patched_open + else: + setattr(obj, attr, patched_open) + + try: + yield + finally: + # Restore all original functions + for obj, attr, original in places_to_patch: + if isinstance(obj, dict): + obj[attr] = original + else: + setattr(obj, attr, original) + + +def virtual_mount( + packet: PacketLike, +) -> tuple[PacketLike, dict[str, str], dict[str, str]]: + """ + Visit all pathset within the packet, and convert them to alternative path + representation. By default, full path is mapped to the file name. If two or + more paths have the same file name, the second one is suffixed with "_1", the + third one with "_2", etc. This is useful for creating a virtual mount point + for a set of files, where the original paths are not important, but the file + names can be used to identify the files. + """ + forward_lut = {} # mapping from original path to new path + reverse_lut = {} # mapping from new path to original path + new_packet = {} + + for key, value in packet.items(): + new_packet[key] = convert_pathset(value, forward_lut, reverse_lut) # type: ignore + + return new_packet, forward_lut, reverse_lut + + +# TODO: re-assess the structure of PathSet and consider making it recursive +def convert_pathset(pathset: PathSet, forward_lut, reverse_lut) -> PathSet: + """ + Convert a pathset to a new pathset. forward_lut and reverse_lut are updated + with the new paths. The new paths are created by replacing the original paths + with the new paths in the forward_lut. The reverse_lut is updated with the + original paths. If name already exists, a suffix is added to the new name to avoid + collisions. + """ + if isinstance(pathset, (str, bytes)): + new_name = Path(pathset).name + if new_name in reverse_lut: + # if the name already exists, add a suffix + i = 1 + while f"{new_name}_{i}" in reverse_lut: + i += 1 + new_name = f"{new_name}_{i}" + forward_lut[pathset] = new_name + reverse_lut[new_name] = pathset + return new_name + elif isinstance(pathset, Collection): + return [convert_pathset(p, forward_lut, reverse_lut) for p in pathset] # type: ignore + else: + raise ValueError( + f"Unsupported pathset type: {type(pathset)}. Expected str, bytes, or Collection." + ) + + +class WrappedPath: + def __init__(self, path, name=None): + self.path = Path(path) + if name is None: + name = self.path.name + self.name = name + + def __fspath__(self) -> str | bytes: + return self.path.__fspath__() + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return f"WrappedPath({self.path}): {self.name}" diff --git a/src/orcapod/stores/legacy/delta_table_arrow_data_store.py b/src/orcapod/stores/legacy/delta_table_arrow_data_store.py new file mode 100644 index 0000000..56bbbfa --- /dev/null +++ b/src/orcapod/stores/legacy/delta_table_arrow_data_store.py @@ -0,0 +1,864 @@ +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.dataset as ds +import polars as pl +from pathlib import Path +from typing import Any +import logging +from deltalake import DeltaTable, write_deltalake +from deltalake.exceptions import TableNotFoundError +from collections import defaultdict + + +# Module-level logger +logger = logging.getLogger(__name__) + + +class DeltaTableArrowDataStore: + """ + Delta Table-based Arrow data store with flexible hierarchical path support and schema preservation. + + Uses tuple-based source paths for robust parameter handling: + - ("source_name", "source_id") -> source_name/source_id/ + - ("org", "project", "dataset") -> org/project/dataset/ + - ("year", "month", "day", "experiment") -> year/month/day/experiment/ + """ + + def __init__( + self, + base_path: str | Path, + duplicate_entry_behavior: str = "error", + create_base_path: bool = True, + max_hierarchy_depth: int = 10, + batch_size: int = 100, + ): + """ + Initialize the DeltaTableArrowDataStore. + + Args: + base_path: Base directory path where Delta tables will be stored + duplicate_entry_behavior: How to handle duplicate entry_ids: + - 'error': Raise ValueError when entry_id already exists + - 'overwrite': Replace existing entry with new data + create_base_path: Whether to create the base path if it doesn't exist + max_hierarchy_depth: Maximum allowed depth for source paths (safety limit) + batch_size: Number of records to batch before writing to Delta table + auto_flush_interval: Time in seconds to auto-flush pending batches (0 to disable) + """ + # Validate duplicate behavior + if duplicate_entry_behavior not in ["error", "overwrite"]: + raise ValueError("duplicate_entry_behavior must be 'error' or 'overwrite'") + + self.duplicate_entry_behavior = duplicate_entry_behavior + self.base_path = Path(base_path) + self.max_hierarchy_depth = max_hierarchy_depth + self.batch_size = batch_size + + if create_base_path: + self.base_path.mkdir(parents=True, exist_ok=True) + elif not self.base_path.exists(): + raise ValueError( + f"Base path {self.base_path} does not exist and create_base_path=False" + ) + + # Cache for Delta tables to avoid repeated initialization + self._delta_table_cache: dict[str, DeltaTable] = {} + + # Batch management + self._pending_batches: dict[str, dict[str, pa.Table]] = defaultdict(dict) + + logger.info( + f"Initialized DeltaTableArrowDataStore at {self.base_path} " + f"with duplicate_entry_behavior='{duplicate_entry_behavior}', " + f"batch_size={batch_size}, as" + ) + + def flush(self) -> None: + """ + Flush all pending batches immediately. + + This method is called to ensure all pending data is written to the Delta tables. + """ + try: + self.flush_all_batches() + except Exception as e: + logger.error(f"Error during flush: {e}") + + def flush_batch(self, source_path: tuple[str, ...]) -> None: + """ + Flush pending batch for a specific source path. + + Args: + source_path: Tuple of path components + """ + logger.debug("Flushing triggered!!") + source_key = self._get_source_key(source_path) + + if ( + source_key not in self._pending_batches + or not self._pending_batches[source_key] + ): + return + + # Get all pending records + pending_tables = self._pending_batches[source_key] + self._pending_batches[source_key] = {} + + try: + # Combine all tables in the batch + combined_table = pa.concat_tables(pending_tables.values()).combine_chunks() + + table_path = self._get_table_path(source_path) + table_path.mkdir(parents=True, exist_ok=True) + + # Check if table exists + delta_table = self._get_existing_delta_table(source_path) + + if delta_table is None: + # TODO: reconsider mode="overwrite" here + write_deltalake( + table_path, + combined_table, + mode="overwrite", + ) + logger.debug( + f"Created new Delta table for {source_key} with {len(combined_table)} records" + ) + else: + if self.duplicate_entry_behavior == "overwrite": + # Get entry IDs from the batch + entry_ids = combined_table.column("__entry_id").to_pylist() + unique_entry_ids = list(set(entry_ids)) + + # Delete existing records with these IDs + if unique_entry_ids: + entry_ids_str = "', '".join(unique_entry_ids) + delete_predicate = f"__entry_id IN ('{entry_ids_str}')" + try: + delta_table.delete(delete_predicate) + logger.debug( + f"Deleted {len(unique_entry_ids)} existing records from {source_key}" + ) + except Exception as e: + logger.debug( + f"No existing records to delete from {source_key}: {e}" + ) + + # otherwise, only insert if same entry_id does not exist yet + delta_table.merge( + source=combined_table, + predicate="target.__entry_id = source.__entry_id", + source_alias="source", + target_alias="target", + ).when_not_matched_insert_all().execute() + + logger.debug( + f"Appended batch of {len(combined_table)} records to {source_key}" + ) + + # Update cache + self._delta_table_cache[source_key] = DeltaTable(str(table_path)) + + except Exception as e: + logger.error(f"Error flushing batch for {source_key}: {e}") + # Put the tables back in the pending queue + self._pending_batches[source_key] = pending_tables + raise + + def flush_all_batches(self) -> None: + """Flush all pending batches.""" + source_keys = list(self._pending_batches.keys()) + + # TODO: capture and re-raise exceptions at the end + for source_key in source_keys: + source_path = tuple(source_key.split("/")) + try: + self.flush_batch(source_path) + except Exception as e: + logger.error(f"Error flushing batch for {source_key}: {e}") + + def __del__(self): + """Cleanup when object is destroyed.""" + self.flush() + + def _validate_source_path(self, source_path: tuple[str, ...]) -> None: + # TODO: consider removing this as path creation can be tried directly + """ + Validate source path components. + + Args: + source_path: Tuple of path components + + Raises: + ValueError: If path is invalid + """ + if not source_path: + raise ValueError("Source path cannot be empty") + + if len(source_path) > self.max_hierarchy_depth: + raise ValueError( + f"Source path depth {len(source_path)} exceeds maximum {self.max_hierarchy_depth}" + ) + + # Validate path components + for i, component in enumerate(source_path): + if not component or not isinstance(component, str): + raise ValueError( + f"Source path component {i} is invalid: {repr(component)}" + ) + + # Check for filesystem-unsafe characters + unsafe_chars = ["/", "\\", ":", "*", "?", '"', "<", ">", "|", "\0"] + if any(char in component for char in unsafe_chars): + raise ValueError( + f"Source path component contains invalid characters: {repr(component)}" + ) + + def _get_source_key(self, source_path: tuple[str, ...]) -> str: + """Generate cache key for source storage.""" + return "/".join(source_path) + + def _get_table_path(self, source_path: tuple[str, ...]) -> Path: + """Get the filesystem path for a given source path.""" + path = self.base_path + for subpath in source_path: + path = path / subpath + return path + + def _get_existing_delta_table( + self, source_path: tuple[str, ...] + ) -> DeltaTable | None: + """ + Get or create a Delta table, handling schema initialization properly. + + Args: + source_path: Tuple of path components + + Returns: + DeltaTable instance or None if table doesn't exist + """ + source_key = self._get_source_key(source_path) + table_path = self._get_table_path(source_path) + + # Check cache first + if dt := self._delta_table_cache.get(source_key): + return dt + + try: + # Try to load existing table + delta_table = DeltaTable(str(table_path)) + self._delta_table_cache[source_key] = delta_table + logger.debug(f"Loaded existing Delta table for {source_key}") + return delta_table + except TableNotFoundError: + # Table doesn't exist + return None + except Exception as e: + logger.error(f"Error loading Delta table for {source_key}: {e}") + # Try to clear any corrupted cache and retry once + if source_key in self._delta_table_cache: + del self._delta_table_cache[source_key] + return None + + def _ensure_entry_id_column(self, arrow_data: pa.Table, entry_id: str) -> pa.Table: + """Ensure the table has an __entry_id column.""" + if "__entry_id" not in arrow_data.column_names: + # Add entry_id column at the beginning + key_array = pa.array([entry_id] * len(arrow_data), type=pa.large_string()) + arrow_data = arrow_data.add_column(0, "__entry_id", key_array) + return arrow_data + + def _remove_entry_id_column(self, arrow_data: pa.Table) -> pa.Table: + """Remove the __entry_id column if it exists.""" + if "__entry_id" in arrow_data.column_names: + column_names = arrow_data.column_names + indices_to_keep = [ + i for i, name in enumerate(column_names) if name != "__entry_id" + ] + arrow_data = arrow_data.select(indices_to_keep) + return arrow_data + + def _handle_entry_id_column( + self, arrow_data: pa.Table, add_entry_id_column: bool | str = False + ) -> pa.Table: + """ + Handle entry_id column based on add_entry_id_column parameter. + + Args: + arrow_data: Arrow table with __entry_id column + add_entry_id_column: Control entry ID column inclusion: + - False: Remove __entry_id column + - True: Keep __entry_id column as is + - str: Rename __entry_id column to custom name + """ + if add_entry_id_column is False: + # Remove the __entry_id column + return self._remove_entry_id_column(arrow_data) + elif isinstance(add_entry_id_column, str): + # Rename __entry_id to custom name + if "__entry_id" in arrow_data.column_names: + schema = arrow_data.schema + new_names = [ + add_entry_id_column if name == "__entry_id" else name + for name in schema.names + ] + return arrow_data.rename_columns(new_names) + # If add_entry_id_column is True, keep __entry_id as is + return arrow_data + + def _create_entry_id_filter(self, entry_id: str) -> list: + """ + Create a proper filter expression for Delta Lake. + + Args: + entry_id: The entry ID to filter by + + Returns: + List containing the filter expression for Delta Lake + """ + return [("__entry_id", "=", entry_id)] + + def _create_entry_ids_filter(self, entry_ids: list[str]) -> list: + """ + Create a proper filter expression for multiple entry IDs. + + Args: + entry_ids: List of entry IDs to filter by + + Returns: + List containing the filter expression for Delta Lake + """ + return [("__entry_id", "in", entry_ids)] + + def _read_table_with_filter( + self, + delta_table: DeltaTable, + filters: list | None = None, + ) -> pa.Table: + """ + Read table using to_pyarrow_dataset with original schema preservation. + + Args: + delta_table: The Delta table to read from + filters: Optional filters to apply + + Returns: + Arrow table with preserved schema + """ + # Use to_pyarrow_dataset with as_large_types for Polars compatible arrow table loading + dataset: ds.Dataset = delta_table.to_pyarrow_dataset(as_large_types=True) + if filters: + # Apply filters at dataset level for better performance + import pyarrow.compute as pc + + filter_expr = None + for filt in filters: + if len(filt) == 3: + col, op, val = filt + if op == "=": + expr = pc.equal(pc.field(col), pa.scalar(val)) # type: ignore + elif op == "in": + expr = pc.is_in(pc.field(col), pa.array(val)) # type: ignore + else: + logger.warning( + f"Unsupported filter operation: {op}. Falling back to table-level filter application which may be less efficient." + ) + # Fallback to table-level filtering + return dataset.to_table()(filters=filters) + + if filter_expr is None: + filter_expr = expr + else: + filter_expr = pc.and_(filter_expr, expr) # type: ignore + + if filter_expr is not None: + return dataset.to_table(filter=filter_expr) + + return dataset.to_table() + + def add_record( + self, + source_path: tuple[str, ...], + entry_id: str, + arrow_data: pa.Table, + force_flush: bool = False, + ) -> pa.Table: + """ + Add a record to the Delta table (batched). + + Args: + source_path: Tuple of path components (e.g., ("org", "project", "dataset")) + entry_id: Unique identifier for this record + arrow_data: The Arrow table data to store + ignore_duplicate: If True, ignore duplicate entry error + force_flush: If True, immediately flush this record to disk + + Returns: + The Arrow table data that was stored + + Raises: + ValueError: If entry_id already exists and duplicate_entry_behavior is 'error' + """ + self._validate_source_path(source_path) + source_key = self._get_source_key(source_path) + + # Check for existing entry + if self.duplicate_entry_behavior == "error": + # Only check existing table, not pending batch for performance + pending_table = self._pending_batches[source_key].get(entry_id, None) + if pending_table is not None: + raise ValueError( + f"Entry '{entry_id}' already exists in pending batch for {source_key}. " + f"Use duplicate_entry_behavior='overwrite' to allow updates." + ) + existing_record = self.get_record(source_path, entry_id, flush=False) + if existing_record is not None: + raise ValueError( + f"Entry '{entry_id}' already exists in {'/'.join(source_path)}. " + f"Use duplicate_entry_behavior='overwrite' to allow updates." + ) + + # Add entry_id column to the data + data_with_entry_id = self._ensure_entry_id_column(arrow_data, entry_id) + + if force_flush: + # Write immediately + table_path = self._get_table_path(source_path) + table_path.mkdir(parents=True, exist_ok=True) + + delta_table = self._get_existing_delta_table(source_path) + + if delta_table is None: + # Create new table - save original schema first + write_deltalake(str(table_path), data_with_entry_id, mode="overwrite") + logger.debug(f"Created new Delta table for {source_key}") + else: + if self.duplicate_entry_behavior == "overwrite": + try: + delta_table.delete( + f"__entry_id = '{entry_id.replace(chr(39), chr(39) + chr(39))}'" + ) + logger.debug( + f"Deleted existing record {entry_id} from {source_key}" + ) + except Exception as e: + logger.debug( + f"No existing record to delete for {entry_id}: {e}" + ) + + write_deltalake( + table_path, + data_with_entry_id, + mode="append", + schema_mode="merge", + ) + + # Update cache + self._delta_table_cache[source_key] = DeltaTable(str(table_path)) + else: + # Add to the batch for later flushing + self._pending_batches[source_key][entry_id] = data_with_entry_id + batch_size = len(self._pending_batches[source_key]) + + # Check if we need to flush + if batch_size >= self.batch_size: + self.flush_batch(source_path) + + logger.debug(f"Added record {entry_id} to {source_key}") + return arrow_data + + def get_pending_batch_info(self) -> dict[str, int]: + """ + Get information about pending batches. + + Returns: + Dictionary mapping source keys to number of pending records + """ + return { + source_key: len(tables) + for source_key, tables in self._pending_batches.items() + if tables + } + + def get_record( + self, source_path: tuple[str, ...], entry_id: str, flush: bool = False + ) -> pa.Table | None: + """ + Get a specific record by entry_id with schema preservation. + + Args: + source_path: Tuple of path components + entry_id: Unique identifier for the record + + Returns: + Arrow table for the record or None if not found + """ + if flush: + self.flush_batch(source_path) + self._validate_source_path(source_path) + + # check if entry_id is found in pending batches + source_key = self._get_source_key(source_path) + if entry_id in self._pending_batches[source_key]: + # Return the pending record directly + return self._pending_batches[source_key][entry_id] + + delta_table = self._get_existing_delta_table(source_path) + if delta_table is None: + return None + + try: + # Use schema-preserving read + filter_expr = self._create_entry_id_filter(entry_id) + result = self._read_table_with_filter(delta_table, filters=filter_expr) + + if len(result) == 0: + return None + + # Remove the __entry_id column before returning + return self._remove_entry_id_column(result) + + except Exception as e: + logger.error( + f"Error getting record {entry_id} from {'/'.join(source_path)}: {e}" + ) + raise e + + def get_all_records( + self, + source_path: tuple[str, ...], + add_entry_id_column: bool | str = False, + retrieve_pending: bool = True, + flush: bool = False, + ) -> pa.Table | None: + """ + Retrieve all records for a given source path as a single table with schema preservation. + + Args: + source_path: Tuple of path components + add_entry_id_column: Control entry ID column inclusion: + - False: Don't include entry ID column (default) + - True: Include entry ID column as "__entry_id" + - str: Include entry ID column with custom name + + Returns: + Arrow table containing all records with original schema, or None if no records found + """ + if flush: + self.flush_batch(source_path) + self._validate_source_path(source_path) + + collected_arrays = [] + if retrieve_pending: + # Check if there are pending records in the batch + for entry_id, arrow_table in self._pending_batches[ + self._get_source_key(source_path) + ].items(): + collected_arrays.append( + self._ensure_entry_id_column(arrow_table, entry_id) + ) + + delta_table = self._get_existing_delta_table(source_path) + if delta_table is not None: + try: + # Use filter-based read + result = self._read_table_with_filter(delta_table) + + if len(result) != 0: + collected_arrays.append(result) + + except Exception as e: + logger.error( + f"Error getting all records from {'/'.join(source_path)}: {e}" + ) + if collected_arrays: + total_table = pa.Table.concatenate(collected_arrays) + + # Handle entry_id column based on parameter + return self._handle_entry_id_column(total_table, add_entry_id_column) + + return None + + def get_all_records_as_polars( + self, source_path: tuple[str, ...], flush: bool = True + ) -> pl.LazyFrame | None: + """ + Retrieve all records for a given source path as a single Polars LazyFrame. + + Args: + source_path: Tuple of path components + + Returns: + Polars LazyFrame containing all records, or None if no records found + """ + all_records = self.get_all_records(source_path, flush=flush) + if all_records is None: + return None + return pl.LazyFrame(all_records) + + def get_records_by_ids( + self, + source_path: tuple[str, ...], + entry_ids: list[str] | pl.Series | pa.Array, + add_entry_id_column: bool | str = False, + preserve_input_order: bool = False, + flush: bool = False, + ) -> pa.Table | None: + """ + Retrieve records by entry IDs as a single table with schema preservation. + + Args: + source_path: Tuple of path components + entry_ids: Entry IDs to retrieve + add_entry_id_column: Control entry ID column inclusion + preserve_input_order: If True, return results in input order with nulls for missing + + Returns: + Arrow table containing all found records with original schema, or None if no records found + """ + if flush: + self.flush_batch(source_path) + + self._validate_source_path(source_path) + + # Convert input to list of strings for consistency + if isinstance(entry_ids, list): + if not entry_ids: + return None + entry_ids_list = entry_ids + elif isinstance(entry_ids, pl.Series): + if len(entry_ids) == 0: + return None + entry_ids_list = entry_ids.to_list() + elif isinstance(entry_ids, pa.Array): + if len(entry_ids) == 0: + return None + entry_ids_list = entry_ids.to_pylist() + else: + raise TypeError( + f"entry_ids must be list[str], pl.Series, or pa.Array, got {type(entry_ids)}" + ) + + delta_table = self._get_existing_delta_table(source_path) + if delta_table is None: + return None + + try: + # Use schema-preserving read with filters + filter_expr = self._create_entry_ids_filter(entry_ids_list) + result = self._read_table_with_filter(delta_table, filters=filter_expr) + + if len(result) == 0: + return None + + if preserve_input_order: + # Need to reorder results and add nulls for missing entries + import pandas as pd + + df = result.to_pandas() + df = df.set_index("__entry_id") + + # Create a DataFrame with the desired order, filling missing with NaN + ordered_df = df.reindex(entry_ids_list) + + # Convert back to Arrow + result = pa.Table.from_pandas(ordered_df.reset_index()) + + # Handle entry_id column based on parameter + return self._handle_entry_id_column(result, add_entry_id_column) + + except Exception as e: + logger.error( + f"Error getting records by IDs from {'/'.join(source_path)}: {e}" + ) + return None + + def get_records_by_ids_as_polars( + self, + source_path: tuple[str, ...], + entry_ids: list[str] | pl.Series | pa.Array, + add_entry_id_column: bool | str = False, + preserve_input_order: bool = False, + flush: bool = False, + ) -> pl.LazyFrame | None: + """ + Retrieve records by entry IDs as a single Polars LazyFrame. + + Args: + source_path: Tuple of path components + entry_ids: Entry IDs to retrieve + add_entry_id_column: Control entry ID column inclusion + preserve_input_order: If True, return results in input order with nulls for missing + + Returns: + Polars LazyFrame containing all found records, or None if no records found + """ + arrow_result = self.get_records_by_ids( + source_path, + entry_ids, + add_entry_id_column, + preserve_input_order, + flush=flush, + ) + + if arrow_result is None: + return None + + # Convert to Polars LazyFrame + return pl.LazyFrame(arrow_result) + + # Additional utility methods + def list_sources(self) -> list[tuple[str, ...]]: + """ + List all available source paths. + + Returns: + List of source path tuples + """ + sources = [] + + def _scan_directory(current_path: Path, path_components: tuple[str, ...]): + """Recursively scan for Delta tables.""" + for item in current_path.iterdir(): + if not item.is_dir(): + continue + + new_path_components = path_components + (item.name,) + + # Check if this directory contains a Delta table + try: + DeltaTable(str(item)) + sources.append(new_path_components) + except TableNotFoundError: + # Not a Delta table, continue scanning subdirectories + if len(new_path_components) < self.max_hierarchy_depth: + _scan_directory(item, new_path_components) + + _scan_directory(self.base_path, ()) + return sources + + def delete_source(self, source_path: tuple[str, ...]) -> bool: + """ + Delete an entire source (all records for a source path). + + Args: + source_path: Tuple of path components + + Returns: + True if source was deleted, False if it didn't exist + """ + self._validate_source_path(source_path) + + # Flush any pending batches first + self.flush_batch(source_path) + + table_path = self._get_table_path(source_path) + source_key = self._get_source_key(source_path) + + if not table_path.exists(): + return False + + try: + # Remove from caches + if source_key in self._delta_table_cache: + del self._delta_table_cache[source_key] + if source_key in self._schema_cache: + del self._schema_cache[source_key] + + # Remove directory + import shutil + + shutil.rmtree(table_path) + + logger.info(f"Deleted source {source_key}") + return True + + except Exception as e: + logger.error(f"Error deleting source {source_key}: {e}") + return False + + def delete_record(self, source_path: tuple[str, ...], entry_id: str) -> bool: + """ + Delete a specific record. + + Args: + source_path: Tuple of path components + entry_id: ID of the record to delete + + Returns: + True if record was deleted, False if it didn't exist + """ + self._validate_source_path(source_path) + + # Flush any pending batches first + self._flush_batch(source_path) + + delta_table = self._get_existing_delta_table(source_path) + if delta_table is None: + return False + + try: + # Check if record exists using proper filter + filter_expr = self._create_entry_id_filter(entry_id) + existing = self._read_table_with_filter(delta_table, filters=filter_expr) + if len(existing) == 0: + return False + + # Delete the record using SQL-style predicate (this is correct for delete operations) + delta_table.delete( + f"__entry_id = '{entry_id.replace(chr(39), chr(39) + chr(39))}'" + ) + + # Update cache + source_key = self._get_source_key(source_path) + self._delta_table_cache[source_key] = delta_table + + logger.debug(f"Deleted record {entry_id} from {'/'.join(source_path)}") + return True + + except Exception as e: + logger.error( + f"Error deleting record {entry_id} from {'/'.join(source_path)}: {e}" + ) + return False + + def get_table_info(self, source_path: tuple[str, ...]) -> dict[str, Any] | None: + """ + Get metadata information about a Delta table. + + Args: + source_path: Tuple of path components + + Returns: + Dictionary with table metadata, or None if table doesn't exist + """ + self._validate_source_path(source_path) + + delta_table = self._get_existing_delta_table(source_path) + if delta_table is None: + return None + + try: + # Get basic info + schema = delta_table.schema() + history = delta_table.history() + source_key = self._get_source_key(source_path) + + # Add pending batch info + pending_info = self.get_pending_batch_info() + pending_count = pending_info.get(source_key, 0) + + return { + "path": str(self._get_table_path(source_path)), + "source_path": source_path, + "schema": schema, + "version": delta_table.version(), + "num_files": len(delta_table.files()), + "history_length": len(history), + "latest_commit": history[0] if history else None, + "pending_records": pending_count, + } + + except Exception as e: + logger.error(f"Error getting table info for {'/'.join(source_path)}: {e}") + return None diff --git a/src/orcapod/store/core.py b/src/orcapod/stores/legacy/dict_data_stores.py similarity index 95% rename from src/orcapod/store/core.py rename to src/orcapod/stores/legacy/dict_data_stores.py index c41dd55..718fef0 100644 --- a/src/orcapod/store/core.py +++ b/src/orcapod/stores/legacy/dict_data_stores.py @@ -4,11 +4,11 @@ from os import PathLike from pathlib import Path -from orcapod.hashing import hash_packet +from orcapod.hashing.legacy_core import hash_packet +from orcapod.hashing.types import LegacyPacketHasher from orcapod.hashing.defaults import get_default_composite_file_hasher -from orcapod.hashing.types import PacketHasher -from orcapod.store.types import DataStore -from orcapod.types import Packet +from orcapod.stores.legacy.types import DataStore +from orcapod.types import Packet, PacketLike logger = logging.getLogger(__name__) @@ -30,15 +30,15 @@ def memoize( self, function_name: str, function_hash: str, - packet: Packet, - output_packet: Packet, + packet: PacketLike, + output_packet: PacketLike, overwrite: bool = False, - ) -> Packet: + ) -> PacketLike: return output_packet def retrieve_memoized( - self, function_name: str, function_hash: str, packet: Packet - ) -> Packet | None: + self, function_name: str, function_hash: str, packet: PacketLike + ) -> PacketLike | None: return None @@ -46,7 +46,7 @@ class DirDataStore(DataStore): def __init__( self, store_dir: str | PathLike = "./pod_data", - packet_hasher: PacketHasher | None = None, + packet_hasher: LegacyPacketHasher | None = None, copy_files=True, preserve_filename=True, overwrite=False, @@ -71,9 +71,9 @@ def memoize( self, function_name: str, function_hash: str, - packet: Packet, - output_packet: Packet, - ) -> Packet: + packet: PacketLike, + output_packet: PacketLike, + ) -> PacketLike: if self.legacy_mode: packet_hash = hash_packet(packet, algorithm=self.legacy_algorithm) else: @@ -139,7 +139,7 @@ def memoize( return retrieved_output_packet def retrieve_memoized( - self, function_name: str, function_hash: str, packet: Packet + self, function_name: str, function_hash: str, packet: PacketLike ) -> Packet | None: if self.legacy_mode: packet_hash = hash_packet(packet, algorithm=self.legacy_algorithm) diff --git a/src/orcapod/store/transfer.py b/src/orcapod/stores/legacy/dict_transfer_data_store.py similarity index 83% rename from src/orcapod/store/transfer.py rename to src/orcapod/stores/legacy/dict_transfer_data_store.py index c9a4e5d..fe7a52a 100644 --- a/src/orcapod/store/transfer.py +++ b/src/orcapod/stores/legacy/dict_transfer_data_store.py @@ -1,12 +1,12 @@ # Implements transfer data store that lets you transfer memoized packets between data stores. -from orcapod.store.types import DataStore -from orcapod.types import Packet +from orcapod.stores.legacy.types import DataStore +from orcapod.types import PacketLike class TransferDataStore(DataStore): """ - A data store that allows transferring memoized packets between different data stores. + A data store that allows transferring recorded data between different data stores. This is useful for moving data between different storage backends. """ @@ -14,7 +14,9 @@ def __init__(self, source_store: DataStore, target_store: DataStore) -> None: self.source_store = source_store self.target_store = target_store - def transfer(self, function_name: str, content_hash: str, packet: Packet) -> Packet: + def transfer( + self, function_name: str, content_hash: str, packet: PacketLike + ) -> PacketLike: """ Transfer a memoized packet from the source store to the target store. """ @@ -29,8 +31,8 @@ def transfer(self, function_name: str, content_hash: str, packet: Packet) -> Pac ) def retrieve_memoized( - self, function_name: str, function_hash: str, packet: Packet - ) -> Packet | None: + self, function_name: str, function_hash: str, packet: PacketLike + ) -> PacketLike | None: """ Retrieve a memoized packet from the target store. """ @@ -57,9 +59,9 @@ def memoize( self, function_name: str, function_hash: str, - packet: Packet, - output_packet: Packet, - ) -> Packet: + packet: PacketLike, + output_packet: PacketLike, + ) -> PacketLike: """ Memoize a packet in the target store. """ diff --git a/src/orcapod/store/arrow_data_stores.py b/src/orcapod/stores/legacy/legacy_arrow_data_stores.py similarity index 72% rename from src/orcapod/store/arrow_data_stores.py rename to src/orcapod/stores/legacy/legacy_arrow_data_stores.py index 4be9698..0a9a7e9 100644 --- a/src/orcapod/store/arrow_data_stores.py +++ b/src/orcapod/stores/legacy/legacy_arrow_data_stores.py @@ -7,7 +7,8 @@ from dataclasses import dataclass from datetime import datetime, timedelta import logging -from orcapod.store.types import DuplicateError +from orcapod.stores.types import DuplicateError +from pathlib import Path # Module-level logger logger = logging.getLogger(__name__) @@ -16,40 +17,47 @@ class MockArrowDataStore: """ Mock Arrow data store for testing purposes. - This class simulates the behavior of ParquetArrowDataStore without actually saving anything. - It is useful for unit tests where you want to avoid filesystem dependencies. + This class simulates the behavior of ArrowDataStore without actually saving anything. + It is useful for unit tests where you want to avoid any I/O operations or when you need + to test the behavior of your code without relying on external systems. If you need some + persistence of saved data, consider using SimpleParquetDataStore without providing a + file path instead. """ def __init__(self): logger.info("Initialized MockArrowDataStore") - def add_record(self, - source_name: str, + def add_record( + self, + source_pathh: tuple[str, ...], source_id: str, entry_id: str, - arrow_data: pa.Table) -> pa.Table: + arrow_data: pa.Table, + ) -> pa.Table: """Add a record to the mock store.""" return arrow_data - def get_record(self, source_name: str, - source_id: str, - entry_id: str) -> pa.Table | None: + def get_record( + self, source_path: tuple[str, ...], source_id: str, entry_id: str + ) -> pa.Table | None: """Get a specific record.""" return None - def get_all_records(self, source_name: str, source_id: str) -> pa.Table | None: + def get_all_records( + self, source_path: tuple[str, ...], source_id: str + ) -> pa.Table | None: """Retrieve all records for a given source as a single table.""" return None def get_all_records_as_polars( - self, source_name: str, source_id: str + self, source_path: tuple[str, ...], source_id: str ) -> pl.LazyFrame | None: """Retrieve all records for a given source as a single Polars LazyFrame.""" return None def get_records_by_ids( self, - source_name: str, + source_path: tuple[str, ...], source_id: str, entry_ids: list[str] | pl.Series | pa.Array, add_entry_id_column: bool | str = False, @@ -76,10 +84,10 @@ def get_records_by_ids( Arrow table containing all found records, or None if no records found """ return None - + def get_records_by_ids_as_polars( self, - source_name: str, + source_path: tuple[str, ...], source_id: str, entry_ids: list[str] | pl.Series | pa.Array, add_entry_id_column: bool | str = False, @@ -88,21 +96,28 @@ def get_records_by_ids_as_polars( return None - - -class InMemoryArrowDataStore: +class SimpleParquetDataStore: """ - In-memory Arrow data store for testing purposes. - This class simulates the behavior of ParquetArrowDataStore without actual file I/O. - It is useful for unit tests where you want to avoid filesystem dependencies. - - Uses dict of dict of Arrow tables for efficient storage and retrieval. + Simple Parquet-based Arrow data store, primarily to be used for development purposes. + If no file path is provided, it will not save anything to disk. Instead, all data will be stored in memory. + If a file path is provided, it will save data to a single Parquet files in a directory structure reflecting + the provided source_path. To speed up the process, data will be stored in memory and only saved to disk + when the `flush` method is called. If used as part of pipeline, flush is automatically called + at the end of pipeline execution. + Note that this store provides only very basic functionality and is not suitable for production use. + For each distinct source_path, only a single parquet file is created to store all data entries. + Appending is not efficient as it requires reading the entire file into the memory, appending new data, + and then writing the entire file back to disk. This is not suitable for large datasets or frequent updates. + However, for development/testing purposes, this data store provides a simple way to store and retrieve + data without the overhead of a full database or file system and provides very high performance. """ - def __init__(self, duplicate_entry_behavior: str = "error"): + def __init__( + self, path: str | Path | None = None, duplicate_entry_behavior: str = "error" + ): """ Initialize the InMemoryArrowDataStore. - + Args: duplicate_entry_behavior: How to handle duplicate entry_ids: - 'error': Raise ValueError when entry_id already exists @@ -112,107 +127,127 @@ def __init__(self, duplicate_entry_behavior: str = "error"): if duplicate_entry_behavior not in ["error", "overwrite"]: raise ValueError("duplicate_entry_behavior must be 'error' or 'overwrite'") self.duplicate_entry_behavior = duplicate_entry_behavior - + # Store Arrow tables: {source_key: {entry_id: arrow_table}} self._in_memory_store: dict[str, dict[str, pa.Table]] = {} - logger.info(f"Initialized InMemoryArrowDataStore with duplicate_entry_behavior='{duplicate_entry_behavior}'") + logger.info( + f"Initialized InMemoryArrowDataStore with duplicate_entry_behavior='{duplicate_entry_behavior}'" + ) + self.base_path = Path(path) if path else None + if self.base_path: + try: + self.base_path.mkdir(parents=True, exist_ok=True) + except Exception as e: + logger.error(f"Error creating base path {self.base_path}: {e}") - def _get_source_key(self, source_name: str, source_id: str) -> str: + def _get_source_key(self, source_path: tuple[str, ...]) -> str: """Generate key for source storage.""" - return f"{source_name}:{source_id}" + return "/".join(source_path) def add_record( self, - source_name: str, - source_id: str, + source_path: tuple[str, ...], entry_id: str, arrow_data: pa.Table, + ignore_duplicate: bool = False, ) -> pa.Table: """ Add a record to the in-memory store. - + Args: source_name: Name of the data source source_id: ID of the specific dataset within the source entry_id: Unique identifier for this record arrow_data: The Arrow table data to store - + Returns: - The original arrow_data table - + arrow_data equivalent to having loaded the corresponding entry that was just saved + Raises: ValueError: If entry_id already exists and duplicate_entry_behavior is 'error' """ - source_key = self._get_source_key(source_name, source_id) - + source_key = self._get_source_key(source_path) + # Initialize source if it doesn't exist if source_key not in self._in_memory_store: self._in_memory_store[source_key] = {} - + local_data = self._in_memory_store[source_key] - + # Check for duplicate entry - if entry_id in local_data and self.duplicate_entry_behavior == "error": - raise ValueError( - f"Entry '{entry_id}' already exists in {source_name}/{source_id}. " - f"Use duplicate_entry_behavior='overwrite' to allow updates." - ) - + if entry_id in local_data: + if not ignore_duplicate and self.duplicate_entry_behavior == "error": + raise ValueError( + f"Entry '{entry_id}' already exists in {source_key}. " + f"Use duplicate_entry_behavior='overwrite' to allow updates." + ) + # Store the record local_data[entry_id] = arrow_data - + action = "Updated" if entry_id in local_data else "Added" logger.debug(f"{action} record {entry_id} in {source_key}") return arrow_data + def load_existing_record(self, source_path: tuple[str, ...]): + source_key = self._get_source_key(source_path) + if self.base_path is not None and source_key not in self._in_memory_store: + self.load_from_parquet(self.base_path, source_path) + def get_record( - self, source_name: str, source_id: str, entry_id: str + self, source_path: tuple[str, ...], entry_id: str ) -> pa.Table | None: """Get a specific record.""" - source_key = self._get_source_key(source_name, source_id) + self.load_existing_record(source_path) + source_key = self._get_source_key(source_path) local_data = self._in_memory_store.get(source_key, {}) return local_data.get(entry_id) - def get_all_records(self, source_name: str, source_id: str) -> pa.Table | None: + def get_all_records( + self, source_path: tuple[str, ...], add_entry_id_column: bool | str = False + ) -> pa.Table | None: """Retrieve all records for a given source as a single table.""" - source_key = self._get_source_key(source_name, source_id) + self.load_existing_record(source_path) + source_key = self._get_source_key(source_path) local_data = self._in_memory_store.get(source_key, {}) - + if not local_data: return None tables_with_keys = [] for key, table in local_data.items(): # Add entry_id column to each table - key_array = pa.array([key] * len(table), type=pa.string()) + key_array = pa.array([key] * len(table), type=pa.large_string()) table_with_key = table.add_column(0, "__entry_id", key_array) tables_with_keys.append(table_with_key) # Concatenate all tables if tables_with_keys: - return pa.concat_tables(tables_with_keys) + combined_table = pa.concat_tables(tables_with_keys) + if not add_entry_id_column: + combined_table = combined_table.drop(columns=["__entry_id"]) + return combined_table return None def get_all_records_as_polars( - self, source_name: str, source_id: str + self, source_path: tuple[str, ...] ) -> pl.LazyFrame | None: """Retrieve all records for a given source as a single Polars LazyFrame.""" - all_records = self.get_all_records(source_name, source_id) + all_records = self.get_all_records(source_path) if all_records is None: return None return pl.LazyFrame(all_records) def get_records_by_ids( self, - source_name: str, - source_id: str, + source_path: tuple[str, ...], entry_ids: list[str] | pl.Series | pa.Array, add_entry_id_column: bool | str = False, preserve_input_order: bool = False, ) -> pa.Table | None: """ Retrieve records by entry IDs as a single table. - + Args: source_name: Name of the data source source_id: ID of the specific dataset within the source @@ -226,7 +261,7 @@ def get_records_by_ids( - str: Include entry ID column with custom name preserve_input_order: If True, return results in the same order as input entry_ids, with null rows for missing entries. If False, return in storage order. - + Returns: Arrow table containing all found records, or None if no records found """ @@ -248,20 +283,22 @@ def get_records_by_ids( f"entry_ids must be list[str], pl.Series, or pa.Array, got {type(entry_ids)}" ) - source_key = self._get_source_key(source_name, source_id) + self.load_existing_record(source_path) + + source_key = self._get_source_key(source_path) local_data = self._in_memory_store.get(source_key, {}) - + if not local_data: return None # Collect matching tables found_tables = [] found_entry_ids = [] - + if preserve_input_order: # Preserve input order, include nulls for missing entries first_table_schema = None - + for entry_id in entry_ids_list: if entry_id in local_data: table = local_data[entry_id] @@ -270,7 +307,7 @@ def get_records_by_ids( table_with_key = table.add_column(0, "__entry_id", key_array) found_tables.append(table_with_key) found_entry_ids.append(entry_id) - + # Store schema for creating null rows if first_table_schema is None: first_table_schema = table_with_key.schema @@ -281,12 +318,14 @@ def get_records_by_ids( null_data = {} for field in first_table_schema: if field.name == "__entry_id": - null_data[field.name] = pa.array([entry_id], type=field.type) + null_data[field.name] = pa.array( + [entry_id], type=field.type + ) else: # Create null array with proper type null_array = pa.array([None], type=field.type) null_data[field.name] = null_array - + null_table = pa.table(null_data, schema=first_table_schema) found_tables.append(null_table) found_entry_ids.append(entry_id) @@ -315,12 +354,17 @@ def get_records_by_ids( # Remove the __entry_id column column_names = combined_table.column_names if "__entry_id" in column_names: - indices_to_keep = [i for i, name in enumerate(column_names) if name != "__entry_id"] + indices_to_keep = [ + i for i, name in enumerate(column_names) if name != "__entry_id" + ] combined_table = combined_table.select(indices_to_keep) elif isinstance(add_entry_id_column, str): # Rename __entry_id to custom name schema = combined_table.schema - new_names = [add_entry_id_column if name == "__entry_id" else name for name in schema.names] + new_names = [ + add_entry_id_column if name == "__entry_id" else name + for name in schema.names + ] combined_table = combined_table.rename_columns(new_names) # If add_entry_id_column is True, keep __entry_id as is @@ -328,15 +372,14 @@ def get_records_by_ids( def get_records_by_ids_as_polars( self, - source_name: str, - source_id: str, + source_path: tuple[str, ...], entry_ids: list[str] | pl.Series | pa.Array, add_entry_id_column: bool | str = False, preserve_input_order: bool = False, ) -> pl.LazyFrame | None: """ Retrieve records by entry IDs as a single Polars LazyFrame. - + Args: source_name: Name of the data source source_id: ID of the specific dataset within the source @@ -350,50 +393,43 @@ def get_records_by_ids_as_polars( - str: Include entry ID column with custom name preserve_input_order: If True, return results in the same order as input entry_ids, with null rows for missing entries. If False, return in storage order. - + Returns: Polars LazyFrame containing all found records, or None if no records found """ # Get Arrow result and convert to Polars arrow_result = self.get_records_by_ids( - source_name, source_id, entry_ids, add_entry_id_column, preserve_input_order + source_path, entry_ids, add_entry_id_column, preserve_input_order ) - + if arrow_result is None: return None - + # Convert to Polars LazyFrame return pl.LazyFrame(arrow_result) def save_to_parquet(self, base_path: str | Path) -> None: """ Save all data to Parquet files in a directory structure. - + Directory structure: base_path/source_name/source_id/data.parquet - + Args: base_path: Base directory path where to save the Parquet files """ base_path = Path(base_path) base_path.mkdir(parents=True, exist_ok=True) - + saved_count = 0 - - for source_key, local_data in self._in_memory_store.items(): + + for source_id, local_data in self._in_memory_store.items(): if not local_data: continue - - # Parse source_name and source_id from the key - if ":" not in source_key: - logger.warning(f"Invalid source key format: {source_key}, skipping") - continue - - source_name, source_id = source_key.split(":", 1) - + # Create directory structure - source_dir = base_path / source_name / source_id + source_dir = base_path / source_id source_dir.mkdir(parents=True, exist_ok=True) - + # Combine all tables for this source with entry_id column tables_with_keys = [] for entry_id, table in local_data.items(): @@ -401,126 +437,130 @@ def save_to_parquet(self, base_path: str | Path) -> None: key_array = pa.array([entry_id] * len(table), type=pa.string()) table_with_key = table.add_column(0, "__entry_id", key_array) tables_with_keys.append(table_with_key) - + # Concatenate all tables if tables_with_keys: combined_table = pa.concat_tables(tables_with_keys) - + # Save as Parquet file + # TODO: perform safe "atomic" write parquet_path = source_dir / "data.parquet" import pyarrow.parquet as pq + pq.write_table(combined_table, parquet_path) - + saved_count += 1 - logger.debug(f"Saved {len(combined_table)} records for {source_key} to {parquet_path}") - + logger.debug( + f"Saved {len(combined_table)} records for {source_id} to {parquet_path}" + ) + logger.info(f"Saved {saved_count} sources to Parquet files in {base_path}") - def load_from_parquet(self, base_path: str | Path) -> None: + def load_from_parquet( + self, base_path: str | Path, source_path: tuple[str, ...] + ) -> None: """ Load data from Parquet files with the expected directory structure. - + Expected structure: base_path/source_name/source_id/data.parquet - + Args: base_path: Base directory path containing the Parquet files """ - base_path = Path(base_path) - - if not base_path.exists(): - logger.warning(f"Base path {base_path} does not exist") + + source_key = self._get_source_key(source_path) + target_path = Path(base_path) / source_key + + if not target_path.exists(): + logger.info(f"Base path {base_path} does not exist") return - - # Clear existing data - self._in_memory_store.clear() - + loaded_count = 0 - - # Traverse directory structure: source_name/source_id/ - for source_name_dir in base_path.iterdir(): - if not source_name_dir.is_dir(): - continue - - source_name = source_name_dir.name - - for source_id_dir in source_name_dir.iterdir(): - if not source_id_dir.is_dir(): - continue - - source_id = source_id_dir.name - source_key = self._get_source_key(source_name, source_id) - - # Look for Parquet files in this directory - parquet_files = list(source_id_dir.glob("*.parquet")) - - if not parquet_files: - logger.debug(f"No Parquet files found in {source_id_dir}") + + # Look for Parquet files in this directory + parquet_files = list(target_path.glob("*.parquet")) + if not parquet_files: + logger.debug(f"No Parquet files found in {target_path}") + return + + # Load all Parquet files and combine them + all_records = [] + + for parquet_file in parquet_files: + try: + import pyarrow.parquet as pq + + table = pq.read_table(parquet_file) + + # Validate that __entry_id column exists + if "__entry_id" not in table.column_names: + logger.warning( + f"Parquet file {parquet_file} missing __entry_id column, skipping" + ) continue - - # Load all Parquet files and combine them - all_records = [] - - for parquet_file in parquet_files: - try: - import pyarrow.parquet as pq - table = pq.read_table(parquet_file) - - # Validate that __entry_id column exists - if "__entry_id" not in table.column_names: - logger.warning(f"Parquet file {parquet_file} missing __entry_id column, skipping") - continue - - all_records.append(table) - logger.debug(f"Loaded {len(table)} records from {parquet_file}") - - except Exception as e: - logger.error(f"Failed to load Parquet file {parquet_file}: {e}") - continue - - # Process all records for this source - if all_records: - # Combine all tables - if len(all_records) == 1: - combined_table = all_records[0] - else: - combined_table = pa.concat_tables(all_records) - - # Split back into individual records by entry_id - local_data = {} - entry_ids = combined_table.column("__entry_id").to_pylist() - - # Group records by entry_id - entry_id_groups = {} - for i, entry_id in enumerate(entry_ids): - if entry_id not in entry_id_groups: - entry_id_groups[entry_id] = [] - entry_id_groups[entry_id].append(i) - - # Extract each entry_id's records - for entry_id, indices in entry_id_groups.items(): - # Take rows for this entry_id and remove __entry_id column - entry_table = combined_table.take(indices) - - # Remove __entry_id column - column_names = entry_table.column_names - if "__entry_id" in column_names: - indices_to_keep = [i for i, name in enumerate(column_names) if name != "__entry_id"] - entry_table = entry_table.select(indices_to_keep) - - local_data[entry_id] = entry_table - - self._in_memory_store[source_key] = local_data - loaded_count += 1 - - record_count = len(combined_table) - unique_entries = len(entry_id_groups) - logger.debug(f"Loaded {record_count} records ({unique_entries} unique entries) for {source_key}") - - logger.info(f"Loaded {loaded_count} sources from Parquet files in {base_path}") - - # Log summary of loaded data - total_records = sum(len(local_data) for local_data in self._in_memory_store.values()) - logger.info(f"Total records loaded: {total_records}") + + all_records.append(table) + logger.debug(f"Loaded {len(table)} records from {parquet_file}") + + except Exception as e: + logger.error(f"Failed to load Parquet file {parquet_file}: {e}") + continue + + # Process all records for this source + if all_records: + # Combine all tables + if len(all_records) == 1: + combined_table = all_records[0] + else: + combined_table = pa.concat_tables(all_records) + + # Split back into individual records by entry_id + local_data = {} + entry_ids = combined_table.column("__entry_id").to_pylist() + + # Group records by entry_id + entry_id_groups = {} + for i, entry_id in enumerate(entry_ids): + if entry_id not in entry_id_groups: + entry_id_groups[entry_id] = [] + entry_id_groups[entry_id].append(i) + + # Extract each entry_id's records + for entry_id, indices in entry_id_groups.items(): + # Take rows for this entry_id and remove __entry_id column + entry_table = combined_table.take(indices) + + # Remove __entry_id column + column_names = entry_table.column_names + if "__entry_id" in column_names: + indices_to_keep = [ + i for i, name in enumerate(column_names) if name != "__entry_id" + ] + entry_table = entry_table.select(indices_to_keep) + + local_data[entry_id] = entry_table + + self._in_memory_store[source_key] = local_data + loaded_count += 1 + + record_count = len(combined_table) + unique_entries = len(entry_id_groups) + logger.info( + f"Loaded {record_count} records ({unique_entries} unique entries) for {source_key}" + ) + + def flush(self): + """ + Flush all in-memory data to Parquet files in the base path. + This will overwrite existing files. + """ + if self.base_path is None: + logger.warning("Base path is not set, cannot flush data") + return + + logger.info(f"Flushing data to Parquet files in {self.base_path}") + self.save_to_parquet(self.base_path) + @dataclass class RecordMetadata: @@ -1634,7 +1674,7 @@ def create_multi_row_record(entry_id: str, num_rows: int = 3) -> pa.Table: store.add_record( "experiments", "dataset_A", valid_entries[0], overwrite_data ) - print(f"✓ Overwrote existing record") + print("✓ Overwrote existing record") # Verify overwrite updated_record = store.get_record( @@ -1648,7 +1688,7 @@ def create_multi_row_record(entry_id: str, num_rows: int = 3) -> pa.Table: # Sync and show final stats store.force_sync() stats = store.get_stats() - print(f"\n=== Final Statistics ===") + print("\n=== Final Statistics ===") print(f"Total records: {stats['total_records']}") print(f"Loaded caches: {stats['loaded_source_caches']}") print(f"Dirty caches: {stats['dirty_caches']}") @@ -1659,6 +1699,380 @@ def create_multi_row_record(entry_id: str, num_rows: int = 3) -> pa.Table: print("\n✓ Single-row constraint testing completed successfully!") +class InMemoryPolarsDataStore: + """ + In-memory Arrow data store using Polars DataFrames for efficient storage and retrieval. + This class provides the same interface as InMemoryArrowDataStore but uses Polars internally + for better performance with large datasets and complex queries. + + Uses dict of Polars DataFrames for efficient storage and retrieval. + Each DataFrame contains all records for a source with an __entry_id column. + """ + + def __init__(self, duplicate_entry_behavior: str = "error"): + """ + Initialize the InMemoryPolarsDataStore. + + Args: + duplicate_entry_behavior: How to handle duplicate entry_ids: + - 'error': Raise ValueError when entry_id already exists + - 'overwrite': Replace existing entry with new data + """ + # Validate duplicate behavior + if duplicate_entry_behavior not in ["error", "overwrite"]: + raise ValueError("duplicate_entry_behavior must be 'error' or 'overwrite'") + self.duplicate_entry_behavior = duplicate_entry_behavior + + # Store Polars DataFrames: {source_key: polars_dataframe} + # Each DataFrame has an __entry_id column plus user data columns + self._in_memory_store: dict[str, pl.DataFrame] = {} + logger.info( + f"Initialized InMemoryPolarsDataStore with duplicate_entry_behavior='{duplicate_entry_behavior}'" + ) + + def _get_source_key(self, source_name: str, source_id: str) -> str: + """Generate key for source storage.""" + return f"{source_name}:{source_id}" + + def add_record( + self, + source_name: str, + source_id: str, + entry_id: str, + arrow_data: pa.Table, + ) -> pa.Table: + """ + Add a record to the in-memory store. + + Args: + source_name: Name of the data source + source_id: ID of the specific dataset within the source + entry_id: Unique identifier for this record + arrow_data: The Arrow table data to store + + Returns: + arrow_data equivalent to having loaded the corresponding entry that was just saved + + Raises: + ValueError: If entry_id already exists and duplicate_entry_behavior is 'error' + """ + source_key = self._get_source_key(source_name, source_id) + + # Convert Arrow table to Polars DataFrame and add entry_id column + polars_data = cast(pl.DataFrame, pl.from_arrow(arrow_data)) + + # Add __entry_id column + polars_data = polars_data.with_columns(pl.lit(entry_id).alias("__entry_id")) + + # Check if source exists + if source_key not in self._in_memory_store: + # First record for this source + self._in_memory_store[source_key] = polars_data + logger.debug(f"Created new source {source_key} with entry {entry_id}") + else: + existing_df = self._in_memory_store[source_key] + + # Check for duplicate entry + entry_exists = ( + existing_df.filter(pl.col("__entry_id") == entry_id).shape[0] > 0 + ) + + if entry_exists: + if self.duplicate_entry_behavior == "error": + raise ValueError( + f"Entry '{entry_id}' already exists in {source_name}/{source_id}. " + f"Use duplicate_entry_behavior='overwrite' to allow updates." + ) + else: # validity of value is checked in constructor so it must be "ovewrite" + # Remove existing entry and add new one + existing_df = existing_df.filter(pl.col("__entry_id") != entry_id) + self._in_memory_store[source_key] = pl.concat( + [existing_df, polars_data] + ) + logger.debug(f"Overwrote entry {entry_id} in {source_key}") + else: + # Append new entry + try: + self._in_memory_store[source_key] = pl.concat( + [existing_df, polars_data] + ) + logger.debug(f"Added entry {entry_id} to {source_key}") + except Exception as e: + # Handle schema mismatch + existing_cols = set(existing_df.columns) - {"__entry_id"} + new_cols = set(polars_data.columns) - {"__entry_id"} + + if existing_cols != new_cols: + raise ValueError( + f"Schema mismatch for {source_key}. " + f"Existing columns: {sorted(existing_cols)}, " + f"New columns: {sorted(new_cols)}" + ) from e + else: + raise e + + return arrow_data + + def get_record( + self, source_name: str, source_id: str, entry_id: str + ) -> pa.Table | None: + """Get a specific record.""" + source_key = self._get_source_key(source_name, source_id) + + if source_key not in self._in_memory_store: + return None + + df = self._in_memory_store[source_key] + + # Filter for the specific entry_id + filtered_df = df.filter(pl.col("__entry_id") == entry_id) + + if filtered_df.shape[0] == 0: + return None + + # Remove __entry_id column and convert to Arrow + result_df = filtered_df.drop("__entry_id") + return result_df.to_arrow() + + def get_all_records( + self, source_name: str, source_id: str, add_entry_id_column: bool | str = False + ) -> pa.Table | None: + """Retrieve all records for a given source as a single table.""" + df = self.get_all_records_as_polars( + source_name, source_id, add_entry_id_column=add_entry_id_column + ) + if df is None: + return None + return df.collect().to_arrow() + + def get_all_records_as_polars( + self, source_name: str, source_id: str, add_entry_id_column: bool | str = False + ) -> pl.LazyFrame | None: + """Retrieve all records for a given source as a single Polars LazyFrame.""" + source_key = self._get_source_key(source_name, source_id) + + if source_key not in self._in_memory_store: + return None + + df = self._in_memory_store[source_key] + + if df.shape[0] == 0: + return None + + # perform column selection lazily + df = df.lazy() + + # Handle entry_id column based on parameter + if add_entry_id_column is False: + # Remove __entry_id column + result_df = df.drop("__entry_id") + elif add_entry_id_column is True: + # Keep __entry_id column as is + result_df = df + elif isinstance(add_entry_id_column, str): + # Rename __entry_id to custom name + result_df = df.rename({"__entry_id": add_entry_id_column}) + else: + raise ValueError( + f"add_entry_id_column must be a bool or str but {add_entry_id_column} was given" + ) + + return result_df + + def get_records_by_ids( + self, + source_name: str, + source_id: str, + entry_ids: list[str] | pl.Series | pa.Array, + add_entry_id_column: bool | str = False, + preserve_input_order: bool = False, + ) -> pa.Table | None: + """ + Retrieve records by entry IDs as a single table. + + Args: + source_name: Name of the data source + source_id: ID of the specific dataset within the source + entry_ids: Entry IDs to retrieve. Can be: + - list[str]: List of entry ID strings + - pl.Series: Polars Series containing entry IDs + - pa.Array: PyArrow Array containing entry IDs + add_entry_id_column: Control entry ID column inclusion: + - False: Don't include entry ID column (default) + - True: Include entry ID column as "__entry_id" + - str: Include entry ID column with custom name + preserve_input_order: If True, return results in the same order as input entry_ids, + with null rows for missing entries. If False, return in storage order. + + Returns: + Arrow table containing all found records, or None if no records found + """ + # Convert input to Polars Series + if isinstance(entry_ids, list): + if not entry_ids: + return None + entry_ids_series = pl.Series("entry_id", entry_ids) + elif isinstance(entry_ids, pl.Series): + if len(entry_ids) == 0: + return None + entry_ids_series = entry_ids + elif isinstance(entry_ids, pa.Array): + if len(entry_ids) == 0: + return None + entry_ids_series: pl.Series = pl.from_arrow( + pa.table({"entry_id": entry_ids}) + )["entry_id"] # type: ignore + else: + raise TypeError( + f"entry_ids must be list[str], pl.Series, or pa.Array, got {type(entry_ids)}" + ) + + source_key = self._get_source_key(source_name, source_id) + + if source_key not in self._in_memory_store: + return None + + df = self._in_memory_store[source_key] + + if preserve_input_order: + # Create DataFrame with input order and join to preserve order with nulls + ordered_df = pl.DataFrame({"__entry_id": entry_ids_series}) + result_df = ordered_df.join(df, on="__entry_id", how="left") + else: + # Filter for matching entry_ids (storage order) + result_df = df.filter(pl.col("__entry_id").is_in(entry_ids_series)) + + if result_df.shape[0] == 0: + return None + + # Handle entry_id column based on parameter + if add_entry_id_column is False: + # Remove __entry_id column + result_df = result_df.drop("__entry_id") + elif add_entry_id_column is True: + # Keep __entry_id column as is + pass + elif isinstance(add_entry_id_column, str): + # Rename __entry_id to custom name + result_df = result_df.rename({"__entry_id": add_entry_id_column}) + + return result_df.to_arrow() + + def get_records_by_ids_as_polars( + self, + source_name: str, + source_id: str, + entry_ids: list[str] | pl.Series | pa.Array, + add_entry_id_column: bool | str = False, + preserve_input_order: bool = False, + ) -> pl.LazyFrame | None: + """ + Retrieve records by entry IDs as a single Polars LazyFrame. + + Args: + source_name: Name of the data source + source_id: ID of the specific dataset within the source + entry_ids: Entry IDs to retrieve. Can be: + - list[str]: List of entry ID strings + - pl.Series: Polars Series containing entry IDs + - pa.Array: PyArrow Array containing entry IDs + add_entry_id_column: Control entry ID column inclusion: + - False: Don't include entry ID column (default) + - True: Include entry ID column as "__entry_id" + - str: Include entry ID column with custom name + preserve_input_order: If True, return results in the same order as input entry_ids, + with null rows for missing entries. If False, return in storage order. + + Returns: + Polars LazyFrame containing all found records, or None if no records found + """ + # Get Arrow result and convert to Polars LazyFrame + arrow_result = self.get_records_by_ids( + source_name, source_id, entry_ids, add_entry_id_column, preserve_input_order + ) + + if arrow_result is None: + return None + + # Convert to Polars LazyFrame + df = cast(pl.DataFrame, pl.from_arrow(arrow_result)) + return df.lazy() + + def entry_exists(self, source_name: str, source_id: str, entry_id: str) -> bool: + """Check if a specific entry exists.""" + source_key = self._get_source_key(source_name, source_id) + + if source_key not in self._in_memory_store: + return False + + df = self._in_memory_store[source_key] + return df.filter(pl.col("__entry_id") == entry_id).shape[0] > 0 + + def list_entries(self, source_name: str, source_id: str) -> set[str]: + """List all entry IDs for a specific source.""" + source_key = self._get_source_key(source_name, source_id) + + if source_key not in self._in_memory_store: + return set() + + df = self._in_memory_store[source_key] + return set(df["__entry_id"].to_list()) + + def list_sources(self) -> set[tuple[str, str]]: + """List all (source_name, source_id) combinations.""" + sources = set() + for source_key in self._in_memory_store.keys(): + if ":" in source_key: + source_name, source_id = source_key.split(":", 1) + sources.add((source_name, source_id)) + return sources + + def clear_source(self, source_name: str, source_id: str) -> None: + """Clear all records for a specific source.""" + source_key = self._get_source_key(source_name, source_id) + if source_key in self._in_memory_store: + del self._in_memory_store[source_key] + logger.debug(f"Cleared source {source_key}") + + def clear_all(self) -> None: + """Clear all records from the store.""" + self._in_memory_store.clear() + logger.info("Cleared all records from store") + + def get_stats(self) -> dict[str, Any]: + """Get comprehensive statistics about the data store.""" + total_records = 0 + total_memory_mb = 0 + source_stats = [] + + for source_key, df in self._in_memory_store.items(): + record_count = df.shape[0] + total_records += record_count + + # Estimate memory usage (rough approximation) + memory_bytes = df.estimated_size() + memory_mb = memory_bytes / (1024 * 1024) + total_memory_mb += memory_mb + + source_stats.append( + { + "source_key": source_key, + "record_count": record_count, + "column_count": df.shape[1] - 1, # Exclude __entry_id + "memory_mb": round(memory_mb, 2), + "columns": [col for col in df.columns if col != "__entry_id"], + } + ) + + return { + "total_records": total_records, + "total_sources": len(self._in_memory_store), + "total_memory_mb": round(total_memory_mb, 2), + "duplicate_entry_behavior": self.duplicate_entry_behavior, + "source_details": source_stats, + } + + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) demo_single_row_constraint() diff --git a/src/orcapod/store/safe_dir_data_store.py b/src/orcapod/stores/legacy/safe_dir_data_store.py similarity index 99% rename from src/orcapod/store/safe_dir_data_store.py rename to src/orcapod/stores/legacy/safe_dir_data_store.py index 0f0ce6a..72f8ef0 100644 --- a/src/orcapod/store/safe_dir_data_store.py +++ b/src/orcapod/stores/legacy/safe_dir_data_store.py @@ -10,7 +10,7 @@ from pathlib import Path from typing import Optional, Union -from .file_ops import atomic_copy, atomic_write +from ..file_utils import atomic_copy, atomic_write logger = logging.getLogger(__name__) @@ -23,7 +23,7 @@ class FileLockError(Exception): @contextmanager def file_lock( - lock_path: Union[str, Path], + lock_path: str | Path, shared: bool = False, timeout: float = 30.0, delay: float = 0.1, @@ -205,7 +205,7 @@ def __init__( def _get_output_dir(self, function_name, content_hash, packet): """Get the output directory for a specific packet""" - from orcapod.hashing.core import hash_dict + from orcapod.hashing.legacy_core import hash_dict packet_hash = hash_dict(packet) return self.store_dir / function_name / content_hash / str(packet_hash) diff --git a/src/orcapod/store/types.py b/src/orcapod/stores/legacy/types.py similarity index 75% rename from src/orcapod/store/types.py rename to src/orcapod/stores/legacy/types.py index 6c1b5af..42b0ed5 100644 --- a/src/orcapod/store/types.py +++ b/src/orcapod/stores/legacy/types.py @@ -1,6 +1,6 @@ from typing import Protocol, runtime_checkable -from orcapod.types import Tag, Packet +from orcapod.types import Tag, PacketLike import pyarrow as pa import polars as pl @@ -21,13 +21,13 @@ def memoize( self, function_name: str, function_hash: str, - packet: Packet, - output_packet: Packet, - ) -> Packet: ... + packet: PacketLike, + output_packet: PacketLike, + ) -> PacketLike: ... def retrieve_memoized( - self, function_name: str, function_hash: str, packet: Packet - ) -> Packet | None: ... + self, function_name: str, function_hash: str, packet: PacketLike + ) -> PacketLike | None: ... @runtime_checkable @@ -41,30 +41,29 @@ def __init__(self, *args, **kwargs) -> None: ... def add_record( self, - source_name: str, - source_id: str, + source_path: tuple[str, ...], entry_id: str, arrow_data: pa.Table, + ignore_duplicate: bool = False, ) -> pa.Table: ... def get_record( - self, source_name: str, source_id: str, entry_id: str + self, source_path: tuple[str, ...], entry_id: str ) -> pa.Table | None: ... - def get_all_records(self, source_name: str, source_id: str) -> pa.Table | None: + def get_all_records(self, source_path: tuple[str, ...]) -> pa.Table | None: """Retrieve all records for a given source as a single table.""" ... def get_all_records_as_polars( - self, source_name: str, source_id: str + self, source_path: tuple[str, ...] ) -> pl.LazyFrame | None: """Retrieve all records for a given source as a single Polars DataFrame.""" ... def get_records_by_ids( self, - source_name: str, - source_id: str, + source_path: tuple[str, ...], entry_ids: list[str] | pl.Series | pa.Array, add_entry_id_column: bool | str = False, preserve_input_order: bool = False, @@ -74,11 +73,14 @@ def get_records_by_ids( def get_records_by_ids_as_polars( self, - source_name: str, - source_id: str, + source_path: tuple[str, ...], entry_ids: list[str] | pl.Series | pa.Array, add_entry_id_column: bool | str = False, preserve_input_order: bool = False, ) -> pl.LazyFrame | None: """Retrieve records by entry IDs as a single Polars DataFrame.""" ... + + def flush(self) -> None: + """Flush all pending writes/saves to the data store.""" + ... diff --git a/src/orcapod/types/__init__.py b/src/orcapod/types/__init__.py index f372259..ca29627 100644 --- a/src/orcapod/types/__init__.py +++ b/src/orcapod/types/__init__.py @@ -1,52 +1,16 @@ -# src/orcabridge/types.py -import os -from collections.abc import Collection, Mapping -from pathlib import Path -from typing import Any, Protocol -from typing_extensions import TypeAlias -from .core import TypeSpec, TypeHandler - - -SUPPORTED_PYTHON_TYPES = (str, int, float, bool, bytes) - -# Convenience alias for anything pathlike -PathLike = str | os.PathLike - -# an (optional) string or a collection of (optional) string values -# Note that TagValue can be nested, allowing for an arbitrary depth of nested lists -TagValue: TypeAlias = str | None | Collection["TagValue"] - -# the top level tag is a mapping from string keys to values that can be a string or -# an arbitrary depth of nested list of strings or None -Tag: TypeAlias = Mapping[str, TagValue] - -# a pathset is a path or an arbitrary depth of nested list of paths -PathSet: TypeAlias = PathLike | Collection[PathLike | None] - -# Simple data types that we support (with clear Polars correspondence) -SupportedNativePythonData: TypeAlias = str | int | float | bool | bytes - -ExtendedSupportedPythonData: TypeAlias = SupportedNativePythonData | PathLike - -# Extended data values that can be stored in packets -# Either the original PathSet or one of our supported simple data types -DataValue: TypeAlias = PathSet | SupportedNativePythonData | Collection["DataValue"] - - -# a packet is a mapping from string keys to data values -Packet: TypeAlias = Mapping[str, DataValue] - -# a batch is a tuple of a tag and a list of packets -Batch: TypeAlias = tuple[Tag, Collection[Packet]] - - -class PodFunction(Protocol): - """ - A function suitable to be used in a FunctionPod. - It takes one or more named arguments, each corresponding to either: - - A path to a file or directory (PathSet) - for backward compatibility - - A simple data value (str, int, float, bool, bytes, Path) - and returns either None, a single value, or a list of values - """ - - def __call__(self, **kwargs: DataValue) -> None | DataValue | list[DataValue]: ... +from .core import PathLike, PathSet, TypeSpec, DataValue +from . import typespec_utils +from .defaults import DEFAULT_REGISTRY as default_registry + +Packet = dict[str, str] +PacketLike = Packet + + +__all__ = [ + "TypeSpec", + "PathLike", + "PathSet", + "typespec_utils", + "DataValue", + "default_registry", +] diff --git a/src/orcapod/types/arrow_utils.py b/src/orcapod/types/arrow_utils.py new file mode 100644 index 0000000..34a06a3 --- /dev/null +++ b/src/orcapod/types/arrow_utils.py @@ -0,0 +1,123 @@ +# from collections.abc import Mapping, Collection +# import pyarrow as pa +# from typing import Any + + +# def join_arrow_schemas(*schemas: pa.Schema) -> pa.Schema: +# """Join multiple Arrow schemas into a single schema, ensuring compatibility of fields. In particular, +# no field names should collide.""" +# merged_fields = [] +# for schema in schemas: +# merged_fields.extend(schema) +# return pa.schema(merged_fields) + + +# def split_by_column_groups( +# self, *column_groups: Collection[str] +# ) -> tuple[pa.Table | None]: +# """ +# Split the table into multiple tables based on the provided column groups. +# Each group is a collection of column names that should be included in the same table. +# The remaining columns that are not part of any group will be returned as the first table/None. +# """ +# if not column_groups: +# return (self,) + +# tables = [] +# remaining_columns = set(self.column_names) + +# for group in column_groups: +# group_columns = [col for col in group if col in remaining_columns] +# if group_columns: +# tables.append(self.select(group_columns)) +# remaining_columns.difference_update(group_columns) +# else: +# tables.append(None) + +# remaining_table = None +# if remaining_columns: +# orderd_remaining_columns = self.column_names +# remaining_columns = [ +# col for col in orderd_remaining_columns if col in remaining_columns +# ] +# remaining_table = self.select(orderd_remaining_columns) +# return (remaining_table, *tables) + + +# def prepare_prefixed_columns( +# table: pa.Table, +# prefix_group: Collection[str] | Mapping[str, Any | None], +# ) -> tuple[pa.Table, pa.Table]: +# """ """ +# if isinstance(prefix_group, Mapping): +# prefix_group = {k: v if v is not None else {} for k, v in prefix_group.items()} +# elif isinstance(prefix_group, Collection): +# prefix_group = {name: {} for name in prefix_group} +# else: +# raise TypeError( +# "prefix_group must be a Collection of strings or a Mapping of string to string or None." +# ) + +# # Visit each prefix group and split them into separate tables +# member_columns = {} + +# for col_name in table.column_names: +# for prefix in prefix_group: +# if col_name.startswith(prefix): +# # Remove the prefix from the column name +# base_name = col_name.removeprefix(prefix) +# if base_name not in member_columns: +# member_columns[base_name] = [] +# member_columns[base_name].append(table.column(col_name)) + +# data_columns = [] +# data_column_names = [] +# existing_source_info = {} + +# for i, name in enumerate(table.column_names): +# if name.startswith(SOURCE_INFO_PREFIX): +# # Extract the base column name +# base_name = name.removeprefix(SOURCE_INFO_PREFIX) +# existing_source_info[base_name] = table.column(i) +# else: +# data_columns.append(table.column(i)) +# data_column_names.append(name) + +# # Step 2: Create source_info columns for each regular column +# source_info_columns = [] +# source_info_column_names = [] + +# # Create source_info columns for each regular column +# num_rows = table.num_rows + +# for col_name in data_column_names: +# source_info_col_name = f"{SOURCE_INFO_PREFIX}{col_name}" + +# # if col_name is in source_info, use that value +# if col_name in source_info: +# # Use value from source_info dictionary +# source_value = source_info[col_name] +# source_values = pa.array([source_value] * num_rows, type=pa.large_string()) +# # if col_name is in existing_source_info, use that column +# elif col_name in existing_source_info: +# # Use existing source_info column, but convert to large_string +# existing_col = existing_source_info[col_name] +# if existing_col.type == pa.large_string(): +# source_values = existing_col +# else: +# # Convert to large_string +# source_values = pa.compute.cast(existing_col, pa.large_string()) # type: ignore + +# else: +# # Use null values +# source_values = pa.array([None] * num_rows, type=pa.large_string()) + +# source_info_columns.append(source_values) +# source_info_column_names.append(source_info_col_name) + +# # Step 3: Create the final table +# data_table: pa.Table = pa.Table.from_arrays(data_columns, names=data_column_names) +# source_info_table: pa.Table = pa.Table.from_arrays( +# source_info_columns, names=source_info_column_names +# ) +# return data_table, source_info_table diff --git a/src/orcapod/types/core.py b/src/orcapod/types/core.py index 5822f87..b43d21a 100644 --- a/src/orcapod/types/core.py +++ b/src/orcapod/types/core.py @@ -1,17 +1,10 @@ -from typing import Protocol, Any, TypeAlias, Mapping -import pyarrow as pa -from dataclasses import dataclass +from typing import Protocol, Any, TypeAlias +import os +from collections.abc import Collection, Mapping +import logging -# TODO: reconsider the need for this dataclass as its information is superfluous -# to the registration of the handler into the registry. -@dataclass -class TypeInfo: - python_type: type - arrow_type: pa.DataType - semantic_type: str | None # name under which the type is registered - handler: "TypeHandler" - +logger = logging.getLogger(__name__) DataType: TypeAlias = type @@ -19,39 +12,23 @@ class TypeInfo: str, DataType ] # Mapping of parameter names to their types +# Convenience alias for anything pathlike +PathLike = str | os.PathLike -class TypeHandler(Protocol): - """Protocol for handling conversion between Python types and underlying Arrow - data types used for storage. - - The handler itself IS the definition of a semantic type. The semantic type - name/identifier is provided by the registerer when registering the handler. - - TypeHandlers should clearly communicate what Python types they can handle, - and focus purely on conversion logic. - """ - - def python_types(self) -> type | tuple[type, ...]: - """Return the Python type(s) this handler can process. +# an (optional) string or a collection of (optional) string values +# Note that TagValue can be nested, allowing for an arbitrary depth of nested lists +TagValue: TypeAlias = int | str | None | Collection["TagValue"] - Returns: - Single Type or tuple of Types this handler supports +# a pathset is a path or an arbitrary depth of nested list of paths +PathSet: TypeAlias = PathLike | Collection[PathLike | None] - Examples: - - PathHandler: return Path - - NumericHandler: return (int, float) - - CollectionHandler: return (list, tuple, set) - """ - ... +# Simple data types that we support (with clear Polars correspondence) +SupportedNativePythonData: TypeAlias = str | int | float | bool | bytes - def storage_type(self) -> pa.DataType: - """Return the Arrow DataType instance for schema definition.""" - ... +ExtendedSupportedPythonData: TypeAlias = SupportedNativePythonData | PathSet - def python_to_storage(self, value: Any) -> Any: - """Convert Python value to Arrow-compatible storage representation.""" - ... +# Extended data values that can be stored in packets +# Either the original PathSet or one of our supported simple data types +DataValue: TypeAlias = ExtendedSupportedPythonData | Collection["DataValue"] | None - def storage_to_python(self, value: Any) -> Any: - """Convert storage representation back to Python object.""" - ... +PacketLike: TypeAlias = Mapping[str, DataValue] diff --git a/src/orcapod/types/default.py b/src/orcapod/types/default.py deleted file mode 100644 index d41e577..0000000 --- a/src/orcapod/types/default.py +++ /dev/null @@ -1,18 +0,0 @@ -from .registry import TypeRegistry -from .handlers import ( - PathHandler, - UUIDHandler, - SimpleMappingHandler, - DateTimeHandler, -) -import pyarrow as pa - -# Create default registry and register handlers -default_registry = TypeRegistry() - -# Register with semantic names - registry extracts supported types automatically -default_registry.register("path", PathHandler()) -default_registry.register("uuid", UUIDHandler()) -default_registry.register( - "datetime", DateTimeHandler() -) # Registers for datetime, date, time diff --git a/src/orcapod/types/defaults.py b/src/orcapod/types/defaults.py new file mode 100644 index 0000000..f7b5773 --- /dev/null +++ b/src/orcapod/types/defaults.py @@ -0,0 +1,51 @@ +# A collection of versioned hashers that provide a "default" implementation of hashers. +from orcapod.utils.object_spec import parse_objectspec + + +from orcapod.types.semantic_types import ( + SemanticTypeRegistry, + SemanticType, + CanonicalPath, + PathlibPathConverter, + ArrowStringPathConverter, +) + +CURRENT_VERSION = "v0.1" + + +semantic_path_objectspec = { + "v0.1": { + "_class": "orcapod.types.semantic_types.SemanticType", + "_config": { + "name": "path", + "description": "File system path representation", + "python_converters": [ + { + "_class": "orcapod.types.semantic_types.PathlibPathConverter", + } + ], + "arrow_converters": [ + { + "_class": "orcapod.types.semantic_types.ArrowStringPathConverter", + } + ], + }, + } +} + +semantic_registry_objectspec = { + "v0.1": { + "_class": "orcapod.types.semantic_types.SemanticTypeRegistry", + "_config": {"semantic_types": [semantic_path_objectspec["v0.1"]]}, + } +} + + +SEMANTIC_PATH = SemanticType[CanonicalPath]( + "path", + "File system path representation", + python_converters=[PathlibPathConverter()], + arrow_converters=[ArrowStringPathConverter()], +) + +DEFAULT_REGISTRY = SemanticTypeRegistry([SEMANTIC_PATH]) diff --git a/src/orcapod/types/legacy/packets.py b/src/orcapod/types/legacy/packets.py new file mode 100644 index 0000000..7950d5b --- /dev/null +++ b/src/orcapod/types/legacy/packets.py @@ -0,0 +1,349 @@ +from orcapod.types.core import DataValue +from typing import TypeAlias, Any +from collections.abc import Mapping, Collection +from orcapod.types.core import TypeSpec, Tag, TypeHandler +from orcapod.types.legacy.semantic_type_registry import SemanticTypeRegistry +from orcapod.types import schemas +from orcapod.types.typespec_utils import get_typespec_from_dict +import pyarrow as pa + +# A conveniece packet-like type that defines a value that can be +# converted to a packet. It's broader than Packet and a simple mapping +# from string keys to DataValue (e.g., int, float, str) can be regarded +# as PacketLike, allowing for more flexible interfaces. +# Anything that requires Packet-like data but without the strict features +# of a Packet should accept PacketLike. +# One should be careful when using PacketLike as a return type as it does not +# enforce the typespec or source_info, which are important for packet integrity. +PacketLike: TypeAlias = Mapping[str, DataValue] + + +class Packet(dict[str, DataValue]): + def __init__( + self, + obj: PacketLike | None = None, + typespec: TypeSpec | None = None, + source_info: dict[str, str | None] | None = None, + ): + if obj is None: + obj = {} + super().__init__(obj) + if typespec is None: + typespec = get_typespec_from_dict(self) + self._typespec = typespec + if source_info is None: + source_info = {} + self._source_info = source_info + + @property + def typespec(self) -> TypeSpec: + # consider returning a copy for immutability + return self._typespec + + @property + def source_info(self) -> dict[str, str | None]: + return {key: self._source_info.get(key, None) for key in self.keys()} + + @source_info.setter + def source_info(self, source_info: Mapping[str, str | None]): + self._source_info = { + key: value for key, value in source_info.items() if value is not None + } + + def get_composite(self) -> PacketLike: + composite = self.copy() + for k, v in self.source_info.items(): + composite[f"_source_info_{k}"] = v + return composite + + def map_keys( + self, mapping: Mapping[str, str], drop_unmapped: bool = False + ) -> "Packet": + """ + Map the keys of the packet using the provided mapping. + + Args: + mapping: A dictionary mapping old keys to new keys. + + Returns: + A new Packet with keys mapped according to the provided mapping. + """ + if drop_unmapped: + new_content = {v: self[k] for k, v in mapping.items() if k in self} + new_typespec = { + v: self.typespec[k] for k, v in mapping.items() if k in self.typespec + } + new_source_info = { + v: self.source_info[k] + for k, v in mapping.items() + if k in self.source_info + } + else: + new_content = {mapping.get(k, k): v for k, v in self.items()} + new_typespec = {mapping.get(k, k): v for k, v in self.typespec.items()} + new_source_info = { + mapping.get(k, k): v for k, v in self.source_info.items() + } + + return Packet(new_content, typespec=new_typespec, source_info=new_source_info) + + def join(self, other: "Packet") -> "Packet": + """ + Join another packet to this one, merging their keys and values. + + Args: + other: Another Packet to join with this one. + + Returns: + A new Packet with keys and values from both packets. + """ + # make sure there is no key collision + if not set(self.keys()).isdisjoint(other.keys()): + raise ValueError( + f"Key collision detected: packets {self} and {other} have overlapping keys" + " and cannot be joined without losing information." + ) + + new_content = {**self, **other} + new_typespec = {**self.typespec, **other.typespec} + new_source_info = {**self.source_info, **other.source_info} + + return Packet(new_content, typespec=new_typespec, source_info=new_source_info) + + +# a batch is a tuple of a tag and a list of packets +Batch: TypeAlias = tuple[Tag, Collection[Packet]] + + +class SemanticPacket(dict[str, Any]): + """ + A packet that conforms to a semantic schema, mapping string keys to values. + + This is used to represent data packets in OrcaPod with semantic types. + + Attributes + ---------- + keys : str + The keys of the packet. + values : Any + The values corresponding to each key. + + Examples + -------- + >>> packet = SemanticPacket(name='Alice', age=30) + >>> print(packet) + {'name': 'Alice', 'age': 30} + """ + + def __init__( + self, + *args, + semantic_schema: schemas.SemanticSchema | None = None, + source_info: dict[str, str | None] | None = None, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.schema = semantic_schema + if source_info is None: + source_info = {} + self.source_info = source_info + + def get_composite(self) -> dict[str, Any]: + composite = self.copy() + for k, v in self.source_info.items(): + composite[f"_source_info_{k}"] = v + return composite + + +class PacketConverter: + def __init__( + self, + typespec: TypeSpec, + registry: SemanticTypeRegistry, + include_source_info: bool = True, + ): + self.typespec = typespec + self.registry = registry + + self.semantic_schema = schemas.from_typespec_to_semantic_schema( + typespec, registry + ) + + self.include_source_info = include_source_info + + self.arrow_schema = schemas.from_semantic_schema_to_arrow_schema( + self.semantic_schema, include_source_info=self.include_source_info + ) + + self.key_handlers: dict[str, TypeHandler] = {} + + self.expected_key_set = set(self.typespec.keys()) + + for key, (_, semantic_type) in self.semantic_schema.items(): + if semantic_type is None: + continue + handler = registry.get_handler_by_semantic_type(semantic_type) + if handler is None: + raise ValueError( + f"No handler found for semantic type '{semantic_type}' in key '{key}'" + ) + self.key_handlers[key] = handler + + def _check_key_consistency(self, keys): + """Check if the provided keys match the expected keys.""" + keys_set = set(keys) + if keys_set != self.expected_key_set: + missing_keys = self.expected_key_set - keys_set + extra_keys = keys_set - self.expected_key_set + error_parts = [] + if missing_keys: + error_parts.append(f"Missing keys: {missing_keys}") + if extra_keys: + error_parts.append(f"Extra keys: {extra_keys}") + + raise KeyError(f"Keys don't match expected keys. {'; '.join(error_parts)}") + + def from_python_packet_to_semantic_packet( + self, python_packet: PacketLike + ) -> SemanticPacket: + """Convert a Python packet to a semantic packet. + + Args: + python_packet: Dictionary mapping parameter names to Python values + + Returns: + Packet with values converted to semantic types + + Raises: + KeyError: If packet keys don't match the expected type_info keys + TypeError: If value type doesn't match expected type + ValueError: If conversion fails + """ + # Validate packet keys + semantic_packet = SemanticPacket( + python_packet, + semantic_schema=self.semantic_schema, + source_info=getattr(python_packet, "source_info", None), + ) + self._check_key_consistency(set(semantic_packet.keys())) + + # convert from storage to Python types for semantic types + for key, handler in self.key_handlers.items(): + try: + semantic_packet[key] = handler.python_to_storage(semantic_packet[key]) + except Exception as e: + raise ValueError(f"Failed to convert value for '{key}': {e}") from e + + return semantic_packet + + def from_python_packet_to_arrow_table(self, python_packet: PacketLike) -> pa.Table: + """Convert a Python packet to an Arrow table. + + Args: + python_packet: Dictionary mapping parameter names to Python values + + Returns: + Arrow table representation of the packet + """ + semantic_packet = self.from_python_packet_to_semantic_packet(python_packet) + return self.from_semantic_packet_to_arrow_table(semantic_packet) + + def from_semantic_packet_to_arrow_table( + self, semantic_packet: SemanticPacket + ) -> pa.Table: + """Convert a semantic packet to an Arrow table. + + Args: + semantic_packet: SemanticPacket with values to convert + + Returns: + Arrow table representation of the packet + """ + if self.include_source_info: + return pa.Table.from_pylist( + [semantic_packet.get_composite()], schema=self.arrow_schema + ) + else: + return pa.Table.from_pylist([semantic_packet], schema=self.arrow_schema) + + def from_arrow_table_to_semantic_packets( + self, arrow_table: pa.Table + ) -> Collection[SemanticPacket]: + """Convert an Arrow table to a semantic packet. + + Args: + arrow_table: Arrow table representation of the packet + + Returns: + SemanticPacket with values converted from Arrow types + """ + # TODO: this is a crude check, implement more robust one to check that + # schema matches what's expected + if not arrow_table.schema.equals(self.arrow_schema): + raise ValueError("Arrow table schema does not match expected schema") + + semantic_packets_contents = arrow_table.to_pylist() + + semantic_packets = [] + for all_packet_content in semantic_packets_contents: + packet_content = { + k: v + for k, v in all_packet_content.items() + if k in self.expected_key_set + } + source_info = { + k.removeprefix("_source_info_"): v + for k, v in all_packet_content.items() + if k.startswith("_source_info_") + } + semantic_packets.append( + SemanticPacket( + packet_content, + semantic_schema=self.semantic_schema, + source_info=source_info, + ) + ) + + return semantic_packets + + def from_semantic_packet_to_python_packet( + self, semantic_packet: SemanticPacket + ) -> Packet: + """Convert a semantic packet to a Python packet. + + Args: + semantic_packet: SemanticPacket with values to convert + + Returns: + Python packet representation of the semantic packet + """ + # Validate packet keys + python_packet = Packet( + semantic_packet, + typespec=self.typespec, + source_info=semantic_packet.source_info, + ) + packet_keys = set(python_packet.keys()) + self._check_key_consistency(packet_keys) + + for key, handler in self.key_handlers.items(): + try: + python_packet[key] = handler.storage_to_python(python_packet[key]) + except Exception as e: + raise ValueError(f"Failed to convert value for '{key}': {e}") from e + + return python_packet + + def from_arrow_table_to_python_packets(self, arrow_table: pa.Table) -> list[Packet]: + """Convert an Arrow table to a list of Python packets. + + Args: + arrow_table: Arrow table representation of the packets + + Returns: + List of Python packets converted from the Arrow table + """ + semantic_packets = self.from_arrow_table_to_semantic_packets(arrow_table) + return [ + self.from_semantic_packet_to_python_packet(sp) for sp in semantic_packets + ] diff --git a/src/orcapod/types/handlers.py b/src/orcapod/types/legacy/semantic_type_handlers.py similarity index 59% rename from src/orcapod/types/handlers.py rename to src/orcapod/types/legacy/semantic_type_handlers.py index ecbdfba..b3bc70c 100644 --- a/src/orcapod/types/handlers.py +++ b/src/orcapod/types/legacy/semantic_type_handlers.py @@ -9,11 +9,11 @@ class PathHandler: """Handler for pathlib.Path objects, stored as strings.""" - def python_types(self) -> type: + def python_type(self) -> type: return Path - def storage_type(self) -> pa.DataType: - return pa.string() + def storage_type(self) -> type: + return str def python_to_storage(self, value: Path) -> str: return str(value) @@ -25,11 +25,11 @@ def storage_to_python(self, value: str) -> Path | None: class UUIDHandler: """Handler for UUID objects, stored as strings.""" - def python_types(self) -> type: + def python_type(self) -> type: return UUID - def storage_type(self) -> pa.DataType: - return pa.string() + def storage_type(self) -> type: + return str def python_to_storage(self, value: UUID) -> str: return str(value) @@ -41,11 +41,11 @@ def storage_to_python(self, value: str) -> UUID | None: class DecimalHandler: """Handler for Decimal objects, stored as strings.""" - def python_types(self) -> type: + def python_type(self) -> type: return Decimal - def storage_type(self) -> pa.DataType: - return pa.string() + def storage_type(self) -> type: + return str def python_to_storage(self, value: Decimal) -> str: return str(value) @@ -57,34 +57,14 @@ def storage_to_python(self, value: str) -> Decimal | None: class SimpleMappingHandler: """Handler for basic types that map directly to Arrow.""" - def __init__(self, python_type: type, arrow_type: pa.DataType): + def __init__(self, python_type: type): self._python_type = python_type - self._arrow_type = arrow_type - def python_types(self) -> type: + def python_type(self) -> type: return self._python_type - def storage_type(self) -> pa.DataType: - return self._arrow_type - - def python_to_storage(self, value: Any) -> Any: - return value # Direct mapping - - def storage_to_python(self, value: Any) -> Any: - return value # Direct mapping - - -class DirectArrowHandler: - """Handler for types that map directly to Arrow without conversion.""" - - def __init__(self, arrow_type: pa.DataType): - self._arrow_type = arrow_type - - def python_types(self) -> type: - return self._arrow_type - - def storage_type(self) -> pa.DataType: - return self._arrow_type + def storage_type(self) -> type: + return self._python_type def python_to_storage(self, value: Any) -> Any: return value # Direct mapping @@ -96,11 +76,11 @@ def storage_to_python(self, value: Any) -> Any: class DateTimeHandler: """Handler for datetime objects.""" - def python_types(self) -> tuple[type, ...]: - return (datetime, date, time) # Handles multiple related types + def python_type(self) -> type: + return datetime - def storage_type(self) -> pa.DataType: - return pa.timestamp("us") # Store everything as timestamp + def storage_type(self) -> type: + return datetime def python_to_storage(self, value: datetime | date | time) -> Any: if isinstance(value, datetime): diff --git a/src/orcapod/types/legacy/semantic_type_registry.py b/src/orcapod/types/legacy/semantic_type_registry.py new file mode 100644 index 0000000..6934bae --- /dev/null +++ b/src/orcapod/types/legacy/semantic_type_registry.py @@ -0,0 +1,461 @@ +import logging +import pyarrow as pa +from ..core import TypeHandler +from dataclasses import dataclass + +# This mapping is expected to be stable +# Be sure to test this assumption holds true +DEFAULT_ARROW_TYPE_LUT = { + int: pa.int64(), + float: pa.float64(), + str: pa.string(), + bool: pa.bool_(), +} + +logger = logging.getLogger(__name__) + + +# TODO: reconsider the need for this dataclass as its information is superfluous +# to the registration of the handler into the registry. +@dataclass +class TypeInfo: + python_type: type + storage_type: type + semantic_type: str | None # name under which the type is registered + handler: "TypeHandler" + + +class SemanticTypeRegistry: + """Registry that manages type handlers with semantic type names.""" + + def __init__(self): + self._handlers: dict[ + type, tuple[TypeHandler, str] + ] = {} # PythonType -> (Handler, semantic_name) + self._semantic_handlers: dict[str, TypeHandler] = {} # semantic_name -> Handler + self._semantic_to_python_lut: dict[ + str, type + ] = {} # semantic_name -> Python type + + def register( + self, + semantic_type: str, + handler: TypeHandler, + ): + """Register a handler with a semantic type name. + + Args: + semantic_name: Identifier for this semantic type (e.g., 'path', 'uuid') + handler: The type handler instance + explicit_types: Optional override of types to register for (if different from handler's supported_types) + override: If True, allow overriding existing registration for the same semantic name and Python type(s) + """ + # Determine which types to register for + + python_type = handler.python_type() + + # Register handler for each type + if python_type in self._handlers: + existing_semantic = self._handlers[python_type][1] + # TODO: handle overlapping registration more gracefully + raise ValueError( + f"Type {python_type} already registered with semantic type '{existing_semantic}'" + ) + + # Register by semantic name + if semantic_type in self._semantic_handlers: + raise ValueError(f"Semantic type '{semantic_type}' already registered") + + self._handlers[python_type] = (handler, semantic_type) + self._semantic_handlers[semantic_type] = handler + self._semantic_to_python_lut[semantic_type] = python_type + + def get_python_type(self, semantic_type: str) -> type | None: + """Get Python type for a semantic type.""" + return self._semantic_to_python_lut.get(semantic_type) + + def lookup_handler_info(self, python_type: type) -> tuple[TypeHandler, str] | None: + """Lookup handler info for a Python type.""" + for registered_type, (handler, semantic_type) in self._handlers.items(): + if issubclass(python_type, registered_type): + return (handler, semantic_type) + return None + + def get_semantic_type(self, python_type: type) -> str | None: + """Get semantic type for a Python type.""" + handler_info = self.lookup_handler_info(python_type) + return handler_info[1] if handler_info else None + + def get_handler(self, python_type: type) -> TypeHandler | None: + """Get handler for a Python type.""" + handler_info = self.lookup_handler_info(python_type) + return handler_info[0] if handler_info else None + + def get_handler_by_semantic_type(self, semantic_type: str) -> TypeHandler | None: + """Get handler by semantic type.""" + return self._semantic_handlers.get(semantic_type) + + def get_type_info(self, python_type: type) -> TypeInfo | None: + """Get TypeInfo for a Python type.""" + handler = self.get_handler(python_type) + if handler is None: + return None + semantic_type = self.get_semantic_type(python_type) + return TypeInfo( + python_type=python_type, + storage_type=handler.storage_type(), + semantic_type=semantic_type, + handler=handler, + ) + + def __contains__(self, python_type: type) -> bool: + """Check if a Python type is registered.""" + for registered_type in self._handlers: + if issubclass(python_type, registered_type): + return True + return False + + +# Below is a collection of functions that handles converting between various aspects of Python packets and Arrow tables. +# Here for convenience, any Python dictionary with str keys and supported Python values are referred to as a packet. + + +# Conversions are: +# python packet <-> storage packet <-> arrow table +# python typespec <-> storage typespec <-> arrow schema +# +# python packet <-> storage packet requires the use of SemanticTypeRegistry +# conversion between storage packet <-> arrow table requires info about semantic_type + + +# # Storage packet <-> Arrow table + +# def stroage_typespec_to_arrow_schema(storage_typespec:TypeSpec, semantic_type_info: dict[str, str]|None = None) -> pa.Schema: +# """Convert storage typespec to Arrow Schema with semantic_type metadata.""" +# """Convert storage typespec to PyArrow Schema with semantic_type metadata.""" +# if semantic_type_info is None: +# semantic_type_info = {} + +# fields = [] +# for field_name, field_type in storage_typespec.items(): +# arrow_type = python_to_pyarrow_type(field_type) +# semantic_type = semantic_type_info.get(field_name, None) +# field_metadata = {"semantic_type": semantic_type} if semantic_type else {} +# fields.append(pa.field(field_name, arrow_type, metadata=field_metadata)) +# return pa.schema(fields) + +# def arrow_schema_to_storage_typespec(schema: pa.Schema) -> tuple[TypeSpec, dict[str, str]|None]: +# """Convert Arrow Schema to storage typespec and semantic type metadata.""" +# typespec = {} +# semantic_type_info = {} + +# for field in schema: +# field_type = field.type +# typespec[field.name] = field_type.to_pandas_dtype() # Convert Arrow type to Pandas dtype +# if field.metadata and b"semantic_type" in field.metadata: +# semantic_type_info[field.name] = field.metadata[b"semantic_type"].decode("utf-8") + +# return typespec, semantic_type_info + + +# def storage_packet_to_arrow_table( +# storage_packet: PacketLike, +# typespec: TypeSpec | None = None, +# semantic_type_info: dict[str, str] | None = None, + + +# # TypeSpec + TypeRegistry + ArrowLUT -> Arrow Schema (annotated with semantic_type) + +# # + + +# # TypeSpec <-> Arrow Schema + +# def schema_from_typespec(typespec: TypeSpec, registry: SemanticTypeRegistry, metadata_info: dict | None = None) -> pa.Schema: +# """Convert TypeSpec to PyArrow Schema.""" +# if metadata_info is None: +# metadata_info = {} + +# fields = [] +# for field_name, field_type in typespec.items(): +# type_info = registry.get_type_info(field_type) +# if type_info is None: +# raise ValueError(f"No type info registered for {field_type}") +# fields.append(pa.field(field_name, type_info.arrow_type, metadata={ +# "semantic_type": type_info.semantic_type +# })) +# return pa.schema(fields) + +# def create_schema_from_typespec( +# typespec: TypeSpec, +# registry: SemanticTypeRegistry, +# metadata_info: dict | None = None, +# arrow_type_lut: dict[type, pa.DataType] | None = None, +# ) -> tuple[list[tuple[str, TypeHandler]], pa.Schema]: +# if metadata_info is None: +# metadata_info = {} +# if arrow_type_lut is None: +# arrow_type_lut = DEFAULT_ARROW_TYPE_LUT + +# keys_with_handlers: list[tuple[str, TypeHandler]] = [] +# schema_fields = [] +# for key, python_type in typespec.items(): +# type_info = registry.get_type_info(python_type) + +# field_metadata = {} +# if type_info and type_info.semantic_type: +# field_metadata["semantic_type"] = type_info.semantic_type +# keys_with_handlers.append((key, type_info.handler)) +# arrow_type = type_info.arrow_type +# else: +# arrow_type = arrow_type_lut.get(python_type) +# if arrow_type is None: +# raise ValueError( +# f"Direct support for Python type {python_type} is not provided. Register a handler to work with {python_type}" +# ) + +# schema_fields.append(pa.field(key, arrow_type, metadata=field_metadata)) +# return keys_with_handlers, pa.schema(schema_fields) + + +# def arrow_table_to_packets( +# table: pa.Table, +# registry: SemanticTypeRegistry, +# ) -> list[Packet]: +# """Convert Arrow table to packet with field metadata. + +# Args: +# packet: Dictionary mapping parameter names to Python values + +# Returns: +# PyArrow Table with the packet data as a single row +# """ +# packets: list[Packet] = [] + +# # prepare converter for each field + +# def no_op(x) -> Any: +# return x + +# converter_lut = {} +# for field in table.schema: +# if field.metadata and b"semantic_type" in field.metadata: +# semantic_type = field.metadata[b"semantic_type"].decode("utf-8") +# if semantic_type: +# handler = registry.get_handler_by_semantic_name(semantic_type) +# if handler is None: +# raise ValueError( +# f"No handler registered for semantic type '{semantic_type}'" +# ) +# converter_lut[field.name] = handler.storage_to_python + +# # Create packets from the Arrow table +# # TODO: make this more efficient +# for row in range(table.num_rows): +# packet: Packet = Packet() +# for field in table.schema: +# value = table.column(field.name)[row].as_py() +# packet[field.name] = converter_lut.get(field.name, no_op)(value) +# packets.append(packet) + +# return packets + + +# def create_arrow_table_with_meta( +# storage_packet: dict[str, Any], type_info: dict[str, TypeInfo] +# ): +# """Create an Arrow table with metadata from a storage packet. + +# Args: +# storage_packet: Dictionary with values in storage format +# type_info: Dictionary mapping parameter names to TypeInfo objects + +# Returns: +# PyArrow Table with metadata +# """ +# schema_fields = [] +# for key, type_info_obj in type_info.items(): +# field_metadata = {} +# if type_info_obj.semantic_type: +# field_metadata["semantic_type"] = type_info_obj.semantic_type + +# field = pa.field(key, type_info_obj.arrow_type, metadata=field_metadata) +# schema_fields.append(field) + +# schema = pa.schema(schema_fields) + +# arrays = [] +# for field in schema: +# value = storage_packet[field.name] +# array = pa.array([value], type=field.type) +# arrays.append(array) + +# return pa.Table.from_arrays(arrays, schema=schema) + + +# def retrieve_storage_packet_from_arrow_with_meta( +# arrow_table: pa.Table, +# ) -> dict[str, Any]: +# """Retrieve storage packet from Arrow table with metadata. + +# Args: +# arrow_table: PyArrow Table with metadata + +# Returns: +# Dictionary representing the storage packet +# """ +# storage_packet = {} +# for field in arrow_table.schema: +# # Extract value from Arrow array +# array = arrow_table.column(field.name) +# if array.num_chunks > 0: +# value = array.chunk(0).as_py()[0] # Get first value +# else: +# value = None # Handle empty arrays + +# storage_packet[field.name] = value + +# return storage_packet + +# def typespec_to_schema_with_metadata(typespec: TypeSpec, field_metadata: dict|None = None) -> pa.Schema: +# """Convert TypeSpec to PyArrow Schema""" +# fields = [] +# for field_name, field_type in typespec.items(): +# arrow_type = python_to_pyarrow_type(field_type) +# fields.append(pa.field(field_name, arrow_type)) +# return pa.schema(fields) + +# def python_to_pyarrow_type(python_type: type, strict:bool=True) -> pa.DataType: +# """Convert Python type (including generics) to PyArrow type""" +# # For anywhere we need to store str value, we use large_string as is done in Polars + +# # Handle basic types first +# basic_mapping = { +# int: pa.int64(), +# float: pa.float64(), +# str: pa.large_string(), +# bool: pa.bool_(), +# bytes: pa.binary(), +# } + +# if python_type in basic_mapping: +# return basic_mapping[python_type] + +# # Handle generic types +# origin = get_origin(python_type) +# args = get_args(python_type) + +# if origin is list: +# # Handle list[T] +# if args: +# element_type = python_to_pyarrow_type(args[0]) +# return pa.list_(element_type) +# else: +# return pa.list_(pa.large_string()) # default to list of strings + +# elif origin is dict: +# # Handle dict[K, V] - PyArrow uses map type +# if len(args) == 2: +# key_type = python_to_pyarrow_type(args[0]) +# value_type = python_to_pyarrow_type(args[1]) +# return pa.map_(key_type, value_type) +# else: +# # Otherwise default to using long string +# return pa.map_(pa.large_string(), pa.large_string()) + +# elif origin is UnionType: +# # Handle Optional[T] (Union[T, None]) +# if len(args) == 2 and type(None) in args: +# non_none_type = args[0] if args[1] is type(None) else args[1] +# return python_to_pyarrow_type(non_none_type) + +# # Default fallback +# if not strict: +# logger.warning(f"Unsupported type {python_type}, defaulting to large_string") +# return pa.large_string() +# else: +# raise TypeError(f"Unsupported type {python_type} for PyArrow conversion. " +# "Set strict=False to allow fallback to large_string.") + +# def arrow_to_dicts(table: pa.Table) -> list[dict[str, Any]]: +# """ +# Convert Arrow table to dictionary or list of dictionaries. +# Returns a list of dictionaries (one per row) with column names as keys. +# Args: +# table: PyArrow Table to convert +# Returns: +# A list of dictionaries for multi-row tables. +# """ +# if len(table) == 0: +# return [] + +# # Multiple rows: return list of dicts (one per row) +# return [ +# {col_name: table.column(col_name)[i].as_py() for col_name in table.column_names} +# for i in range(len(table)) +# ] + +# def get_metadata_from_schema( +# schema: pa.Schema, metadata_field: bytes +# ) -> dict[str, str]: +# """ +# Extract metadata from Arrow schema fields. Metadata value will be utf-8 decoded. +# Args: +# schema: PyArrow Schema to extract metadata from +# metadata_field: Metadata field to extract (e.g., b'semantic_type') +# Returns: +# Dictionary mapping field names to their metadata values +# """ +# metadata = {} +# for field in schema: +# if field.metadata and metadata_field in field.metadata: +# metadata[field.name] = field.metadata[metadata_field].decode("utf-8") +# return metadata + +# def dict_to_arrow_table_with_metadata(data: dict, data_type_info: TypeSpec | None = None, metadata: dict | None = None): +# """ +# Convert a tag dictionary to PyArrow table with metadata on each column. + +# Args: +# tag: Dictionary with string keys and any Python data type values +# metadata_key: The metadata key to add to each column +# metadata_value: The metadata value to indicate this column came from tag +# """ +# if metadata is None: +# metadata = {} + +# if field_types is None: +# # First create the table to infer types +# temp_table = pa.Table.from_pylist([data]) + +# # Create new fields with metadata +# fields_with_metadata = [] +# for field in temp_table.schema: +# # Add metadata to each field +# field_metadata = metadata +# new_field = pa.field( +# field.name, field.type, nullable=field.nullable, metadata=field_metadata +# ) +# fields_with_metadata.append(new_field) + +# # Create schema with metadata +# schema_with_metadata = pa.schema(fields_with_metadata) + +# # Create the final table with the metadata-enriched schema +# table = pa.Table.from_pylist([tag], schema=schema_with_metadata) + +# return table + + +# # def get_columns_with_metadata( +# # df: pl.DataFrame, key: str, value: str | None = None +# # ) -> list[str]: +# # """Get column names with specific metadata using list comprehension. If value is given, only +# # columns matching that specific value for the desginated metadata key will be returned. +# # Otherwise, all columns that contains the key as metadata will be returned regardless of the value""" +# # return [ +# # col_name +# # for col_name, dtype in df.schema.items() +# # if hasattr(dtype, "metadata") +# # and (value is None or getattr(dtype, "metadata") == value) +# # ] diff --git a/src/orcapod/types/registry.py b/src/orcapod/types/registry.py deleted file mode 100644 index 0dafda5..0000000 --- a/src/orcapod/types/registry.py +++ /dev/null @@ -1,439 +0,0 @@ -from collections.abc import Callable, Collection, Sequence -import logging -from optparse import Values -from typing import Any -import pyarrow as pa -from orcapod.types import Packet -from .core import TypeHandler, TypeInfo, TypeSpec - -# This mapping is expected to be stable -# Be sure to test this assumption holds true -DEFAULT_ARROW_TYPE_LUT = { - int: pa.int64(), - float: pa.float64(), - str: pa.string(), - bool: pa.bool_(), -} - -logger = logging.getLogger(__name__) - - -class TypeRegistry: - """Registry that manages type handlers with semantic type names.""" - - def __init__(self): - self._handlers: dict[ - type, tuple[TypeHandler, str] - ] = {} # Type -> (Handler, semantic_name) - self._semantic_handlers: dict[str, TypeHandler] = {} # semantic_name -> Handler - - def register( - self, - semantic_name: str, - handler: TypeHandler, - explicit_types: type | tuple[type, ...] | None = None, - override: bool = False, - ): - """Register a handler with a semantic type name. - - Args: - semantic_name: Identifier for this semantic type (e.g., 'path', 'uuid') - handler: The type handler instance - explicit_types: Optional override of types to register for (if different from handler's supported_types) - override: If True, allow overriding existing registration for the same semantic name and Python type(s) - """ - # Determine which types to register for - if explicit_types is not None: - types_to_register = ( - explicit_types - if isinstance(explicit_types, tuple) - else (explicit_types,) - ) - else: - supported = handler.python_types() - types_to_register = ( - supported if isinstance(supported, tuple) else (supported,) - ) - - # Register handler for each type - for python_type in types_to_register: - if python_type in self._handlers and not override: - existing_semantic = self._handlers[python_type][1] - # TODO: handle overlapping registration more gracefully - raise ValueError( - f"Type {python_type} already registered with semantic type '{existing_semantic}'" - ) - - self._handlers[python_type] = (handler, semantic_name) - - # Register by semantic name - if semantic_name in self._semantic_handlers and not override: - raise ValueError(f"Semantic type '{semantic_name}' already registered") - - self._semantic_handlers[semantic_name] = handler - - def get_handler(self, python_type: type) -> TypeHandler | None: - """Get handler for a Python type.""" - handler_info = self._handlers.get(python_type) - return handler_info[0] if handler_info else None - - def get_semantic_name(self, python_type: type) -> str | None: - """Get semantic name for a Python type.""" - handler_info = self._handlers.get(python_type) - return handler_info[1] if handler_info else None - - def get_type_info(self, python_type: type) -> TypeInfo | None: - """Get TypeInfo for a Python type.""" - handler = self.get_handler(python_type) - if handler is None: - return None - semantic_name = self.get_semantic_name(python_type) - return TypeInfo( - python_type=python_type, - arrow_type=handler.storage_type(), - semantic_type=semantic_name, - handler=handler, - ) - - def get_handler_by_semantic_name(self, semantic_name: str) -> TypeHandler | None: - """Get handler by semantic name.""" - return self._semantic_handlers.get(semantic_name) - - def __contains__(self, python_type: type) -> bool: - """Check if a Python type is registered.""" - return python_type in self._handlers - - -class PacketConverter: - def __init__(self, python_type_spec: TypeSpec, registry: TypeRegistry): - self.python_type_spec = python_type_spec - self.registry = registry - - # Lookup handlers and type info for fast access - self.handlers: dict[str, TypeHandler] = {} - self.storage_type_info: dict[str, TypeInfo] = {} - - self.expected_key_set = set(python_type_spec.keys()) - - # prepare the corresponding arrow table schema with metadata - self.keys_with_handlers, self.schema = create_schema_from_python_type_info( - python_type_spec, registry - ) - - self.semantic_type_lut = get_metadata_from_schema(self.schema, b"semantic_type") - - def _check_key_consistency(self, keys): - """Check if the provided keys match the expected keys.""" - keys_set = set(keys) - if keys_set != self.expected_key_set: - missing_keys = self.expected_key_set - keys_set - extra_keys = keys_set - self.expected_key_set - error_parts = [] - if missing_keys: - error_parts.append(f"Missing keys: {missing_keys}") - if extra_keys: - error_parts.append(f"Extra keys: {extra_keys}") - - raise KeyError(f"Keys don't match expected keys. {'; '.join(error_parts)}") - - def _to_storage_packet(self, packet: Packet) -> dict[str, Any]: - """Convert packet to storage representation. - - Args: - packet: Dictionary mapping parameter names to Python values - - Returns: - Dictionary with same keys but values converted to storage format - - Raises: - KeyError: If packet keys don't match the expected type_info keys - TypeError: If value type doesn't match expected type - ValueError: If conversion fails - """ - # Validate packet keys - packet_keys = set(packet.keys()) - - self._check_key_consistency(packet_keys) - - # Convert each value - storage_packet: dict[str, Any] = ( - packet.copy() - ) # Start with a copy of the packet - - for key, handler in self.keys_with_handlers: - try: - storage_packet[key] = handler.python_to_storage(storage_packet[key]) - except Exception as e: - raise ValueError(f"Failed to convert value for '{key}': {e}") from e - - return storage_packet - - def _from_storage_packet(self, storage_packet: dict[str, Any]) -> Packet: - """Convert storage packet back to Python packet. - - Args: - storage_packet: Dictionary with values in storage format - - Returns: - Packet with values converted back to Python types - - Raises: - KeyError: If storage packet keys don't match the expected type_info keys - TypeError: If value type doesn't match expected type - ValueError: If conversion fails - """ - # Validate storage packet keys - storage_keys = set(storage_packet.keys()) - - self._check_key_consistency(storage_keys) - - # Convert each value back to Python type - packet: Packet = storage_packet.copy() - - for key, handler in self.keys_with_handlers: - try: - packet[key] = handler.storage_to_python(storage_packet[key]) - except Exception as e: - raise ValueError(f"Failed to convert value for '{key}': {e}") from e - - return packet - - def to_arrow_table(self, packet: Packet | Sequence[Packet]) -> pa.Table: - """Convert packet to PyArrow Table with field metadata. - - Args: - packet: Dictionary mapping parameter names to Python values - - Returns: - PyArrow Table with the packet data as a single row - """ - # Convert packet to storage format - if not isinstance(packet, Sequence): - packets = [packet] - else: - packets = packet - - storage_packets = [self._to_storage_packet(p) for p in packets] - - # Create arrays - arrays = [] - for field in self.schema: - values = [p[field.name] for p in storage_packets] - array = pa.array(values, type=field.type) - arrays.append(array) - - return pa.Table.from_arrays(arrays, schema=self.schema) - - def from_arrow_table( - self, table: pa.Table, verify_semantic_equivalence: bool = True - ) -> list[Packet]: - """Convert Arrow table to packet with field metadata. - - Args: - table: PyArrow Table with metadata - - Returns: - List of packets converted from the Arrow table - """ - # Check for consistency in the semantic type mapping: - semantic_type_info = get_metadata_from_schema(table.schema, b"semantic_type") - - if semantic_type_info != self.semantic_type_lut: - if not verify_semantic_equivalence: - logger.warning( - "Arrow table semantic types do not match expected type registry. " - f"Expected: {self.semantic_type_lut}, got: {semantic_type_info}" - ) - else: - raise ValueError( - "Arrow table semantic types do not match expected type registry. " - f"Expected: {self.semantic_type_lut}, got: {semantic_type_info}" - ) - - # Create packets from the Arrow table - # TODO: make this more efficient - storage_packets: list[Packet] = arrow_to_dicts(table) # type: ignore - if not self.keys_with_handlers: - # no special handling required - return storage_packets - - return [self._from_storage_packet(packet) for packet in storage_packets] - - -def arrow_to_dicts(table: pa.Table) -> list[dict[str, Any]]: - """ - Convert Arrow table to dictionary or list of dictionaries. - By default returns a list of dictionaries (one per row) with column names as keys. - If `collapse_singleton` is True, return a single dictionary for single-row tables. - Args: - table: PyArrow Table to convert - collapse_singleton: If True, return a single dictionary for single-row tables. Defaults to False. - Returns: - A dictionary if singleton and collapse_singleton=True. Otherwise, list of dictionaries for multi-row tables. - """ - if len(table) == 0: - return [] - - # Multiple rows: return list of dicts (one per row) - return [ - {col_name: table.column(col_name)[i].as_py() for col_name in table.column_names} - for i in range(len(table)) - ] - - -def get_metadata_from_schema( - schema: pa.Schema, metadata_field: bytes -) -> dict[str, str]: - """ - Extract metadata from Arrow schema fields. Metadata value will be utf-8 decoded. - Args: - schema: PyArrow Schema to extract metadata from - metadata_field: Metadata field to extract (e.g., b'semantic_type') - Returns: - Dictionary mapping field names to their metadata values - """ - metadata = {} - for field in schema: - if field.metadata and metadata_field in field.metadata: - metadata[field.name] = field.metadata[metadata_field].decode("utf-8") - return metadata - - -def create_schema_from_python_type_info( - python_type_spec: TypeSpec, - registry: TypeRegistry, - arrow_type_lut: dict[type, pa.DataType] | None = None, -) -> tuple[list[tuple[str, TypeHandler]], pa.Schema]: - if arrow_type_lut is None: - arrow_type_lut = DEFAULT_ARROW_TYPE_LUT - keys_with_handlers: list[tuple[str, TypeHandler]] = [] - schema_fields = [] - for key, python_type in python_type_spec.items(): - type_info = registry.get_type_info(python_type) - - field_metadata = {} - if type_info and type_info.semantic_type: - field_metadata["semantic_type"] = type_info.semantic_type - keys_with_handlers.append((key, type_info.handler)) - arrow_type = type_info.arrow_type - else: - arrow_type = arrow_type_lut.get(python_type) - if arrow_type is None: - raise ValueError( - f"Direct support for Python type {python_type} is not provided. Register a handler to work with {python_type}" - ) - - schema_fields.append(pa.field(key, arrow_type, metadata=field_metadata)) - return keys_with_handlers, pa.schema(schema_fields) - - -def arrow_table_to_packets( - table: pa.Table, - registry: TypeRegistry, -) -> list[Packet]: - """Convert Arrow table to packet with field metadata. - - Args: - packet: Dictionary mapping parameter names to Python values - - Returns: - PyArrow Table with the packet data as a single row - """ - packets: list[Packet] = [] - - # prepare converter for each field - - def no_op(x) -> Any: - return x - - converter_lut = {} - for field in table.schema: - if field.metadata and b"semantic_type" in field.metadata: - semantic_type = field.metadata[b"semantic_type"].decode("utf-8") - if semantic_type: - handler = registry.get_handler_by_semantic_name(semantic_type) - if handler is None: - raise ValueError( - f"No handler registered for semantic type '{semantic_type}'" - ) - converter_lut[field.name] = handler.storage_to_python - - # Create packets from the Arrow table - # TODO: make this more efficient - for row in range(table.num_rows): - packet: Packet = {} - for field in table.schema: - value = table.column(field.name)[row].as_py() - packet[field.name] = converter_lut.get(field.name, no_op)(value) - packets.append(packet) - - return packets - - -def is_packet_supported( - python_type_info: TypeSpec, registry: TypeRegistry, type_lut: dict | None = None -) -> bool: - """Check if all types in the packet are supported by the registry or known to the default lut.""" - if type_lut is None: - type_lut = {} - return all( - python_type in registry or python_type in type_lut - for python_type in python_type_info.values() - ) - - -def create_arrow_table_with_meta( - storage_packet: dict[str, Any], type_info: dict[str, TypeInfo] -): - """Create an Arrow table with metadata from a storage packet. - - Args: - storage_packet: Dictionary with values in storage format - type_info: Dictionary mapping parameter names to TypeInfo objects - - Returns: - PyArrow Table with metadata - """ - schema_fields = [] - for key, type_info_obj in type_info.items(): - field_metadata = {} - if type_info_obj.semantic_type: - field_metadata["semantic_type"] = type_info_obj.semantic_type - - field = pa.field(key, type_info_obj.arrow_type, metadata=field_metadata) - schema_fields.append(field) - - schema = pa.schema(schema_fields) - - arrays = [] - for field in schema: - value = storage_packet[field.name] - array = pa.array([value], type=field.type) - arrays.append(array) - - return pa.Table.from_arrays(arrays, schema=schema) - - -def retrieve_storage_packet_from_arrow_with_meta( - arrow_table: pa.Table, -) -> dict[str, Any]: - """Retrieve storage packet from Arrow table with metadata. - - Args: - arrow_table: PyArrow Table with metadata - - Returns: - Dictionary representing the storage packet - """ - storage_packet = {} - for field in arrow_table.schema: - # Extract value from Arrow array - array = arrow_table.column(field.name) - if array.num_chunks > 0: - value = array.chunk(0).as_py()[0] # Get first value - else: - value = None # Handle empty arrays - - storage_packet[field.name] = value - - return storage_packet diff --git a/src/orcapod/types/schemas.py b/src/orcapod/types/schemas.py new file mode 100644 index 0000000..57f0551 --- /dev/null +++ b/src/orcapod/types/schemas.py @@ -0,0 +1,357 @@ +from typing import Self +from orcapod.types.core import DataType, TypeSpec +from orcapod.types.semantic_types import ( + SemanticType, + SemanticTypeRegistry, + PythonArrowConverter, +) +import pyarrow as pa +import datetime + +# This mapping is expected to be stable +# Be sure to test this assumption holds true +DEFAULT_ARROW_TYPE_LUT = { + int: pa.int64(), + float: pa.float64(), + str: pa.large_string(), + bool: pa.bool_(), +} + + +def python_to_arrow_type(python_type: type) -> pa.DataType: + if python_type in DEFAULT_ARROW_TYPE_LUT: + return DEFAULT_ARROW_TYPE_LUT[python_type] + raise TypeError(f"Converstion of python type {python_type} is not supported yet") + + +def arrow_to_python_type(arrow_type: pa.DataType) -> type: + if pa.types.is_integer(arrow_type): + return int + elif pa.types.is_floating(arrow_type): + return float + elif pa.types.is_string(arrow_type) or pa.types.is_large_string(arrow_type): + return str + elif pa.types.is_boolean(arrow_type): + return bool + elif pa.types.is_date(arrow_type): + return datetime.date + elif pa.types.is_timestamp(arrow_type): + return datetime.datetime + elif pa.types.is_binary(arrow_type): + return bytes + else: + raise TypeError(f"Conversion of arrow type {arrow_type} is not supported") + + +class PythonSchema(dict[str, DataType]): + """ + A schema for Python data types, mapping string keys to Python types. + + This is used to define the expected structure of data packets in OrcaPod. + + Attributes + ---------- + keys : str + The keys of the schema. + values : type + The types corresponding to each key. + + Examples + -------- + >>> schema = PythonSchema(name=str, age=int) + >>> print(schema) + {'name': , 'age': } + """ + + def copy(self) -> "PythonSchema": + return PythonSchema(self) + + def to_semantic_schema( + self, semantic_type_registry: SemanticTypeRegistry + ) -> "SemanticSchema": + """ + Convert the Python schema to a semantic schema using the provided semantic type registry. + + Parameters + ---------- + semantic_type_registry : SemanticTypeRegistry + The registry containing semantic type information. + + Returns + ------- + SemanticSchema + A new schema mapping keys to tuples of Python types and optional semantic type identifiers. + + Examples + -------- + >>> python_schema = PythonSchema(name=str, age=int) + >>> semantic_schema = python_schema.to_semantic_schema(registry) + >>> print(semantic_schema) + {'name': (str, None), 'age': (int, None)} + """ + return SemanticSchema.from_typespec(self, semantic_type_registry) + + def to_arrow_schema( + self, + semantic_type_registry: SemanticTypeRegistry | None = None, + converters: dict[str, PythonArrowConverter] | None = None, + ) -> pa.Schema: + """ + Convert the Python schema to an Arrow schema. + If converters are provided, they are used to convert the schema. Note that + no validation is performed on the converters, so they must be compatible with the schema. + """ + if converters is not None: + # If converters are provided, use them to convert the schema + fields = [] + for field_name, python_type in self.items(): + if field_name in converters: + converter = converters[field_name] + arrow_type = converter.arrow_type + metadata = None + if converter.semantic_type_name is not None: + metadata = { + b"semantic_type": converter.semantic_type_name.encode( + "utf-8" + ) + } + else: + arrow_type = python_to_arrow_type(python_type) + metadata = None + fields.append(pa.field(field_name, arrow_type, metadata=metadata)) + return pa.schema(fields) + + if semantic_type_registry is None: + raise ValueError( + "semantic_type_registry must be provided if converters are not" + ) + # Otherwise, convert using the semantic type registry + return self.to_semantic_schema(semantic_type_registry).to_arrow_schema() + + @classmethod + def from_semantic_schema(cls, semantic_schema: "SemanticSchema") -> Self: + """ + Create a PythonSchema from a SemanticSchema. + + Parameters + ---------- + semantic_schema : SemanticSchema + The semantic schema to convert. + + Returns + ------- + PythonSchema + A new schema mapping keys to Python types. + """ + return cls(semantic_schema.get_python_types()) + + @classmethod + def from_arrow_schema( + cls, + arrow_schema: pa.Schema, + semantic_type_registry: SemanticTypeRegistry | None = None, + converters: dict[str, PythonArrowConverter] | None = None, + ) -> Self: + """ + Create a PythonSchema from an Arrow schema. + + Parameters + ---------- + arrow_schema : pa.Schema + The Arrow schema to convert. + semantic_type_registry : SemanticTypeRegistry + The registry containing semantic type information. + skip_system_columns : bool, optional + Whether to skip system columns (default is True). + converters : dict[str, PythonArrowConverter], optional + A dictionary of converters to use for converting the schema. If provided, the schema will be + converted using the converters. If not provided, the schema will be converted using the semantic type + registry. + + Returns + ------- + PythonSchema + A new schema mapping keys to Python types. + """ + if converters is not None: + # If converters are provided, use them to convert the schema + python_types = {} + for field in arrow_schema: + # TODO: consider performing validation of semantic type + if field.name in converters: + converter = converters[field.name] + python_types[field.name] = converter.python_type + else: + python_types[field.name] = arrow_to_python_type(field.type) + return cls(python_types) + + if semantic_type_registry is None: + raise ValueError( + "semantic_type_registry must be provided if converters are not" + ) + semantic_schema = SemanticSchema.from_arrow_schema( + arrow_schema, + semantic_type_registry, + ) + return cls(semantic_schema.get_python_types()) + + +class SemanticSchema(dict[str, type | SemanticType]): + """ + A schema for semantic types, mapping string keys to tuples of Python types and optional metadata. + + This is used to define the expected structure of data packets with semantic types in OrcaPod. + + Attributes + ---------- + keys : str + The keys of the schema. + values : type | SemanticType + Either type for simple fields or SemanticType for semantic fields. + + Examples + -------- + >>> schema = SemanticSchema(image=SemanticType('path'), age=int) + >>> print(schema) + {"image": SemanticType(name='path'), "age": })} + """ + + def get_semantic_fields(self) -> dict[str, SemanticType]: + """ + Get a dictionary of semantic fields in the schema. + + Returns + ------- + dict[str, SemanticType] + A dictionary mapping keys to their corresponding SemanticType. + """ + return {k: v for k, v in self.items() if isinstance(v, SemanticType)} + + def get_python_types(self) -> dict[str, type]: + """ + Get the Python types for all keys in the schema. + + Returns + ------- + dict[str, type] + A dictionary mapping keys to their corresponding Python types. + """ + return { + k: v.get_default_python_type() if isinstance(v, SemanticType) else v + for k, v in self.items() + } + + def get_arrow_types(self) -> dict[str, tuple[pa.DataType, str | None]]: + """ + Get the Arrow types for all keys in the schema. + + Returns + ------- + dict[str, tuple[pa.DataType, str|None]] + A dictionary mapping keys to tuples of Arrow types. If the field has a semantic type, + the second element of the tuple is the semantic type name; otherwise, it is None. + """ + return { + k: (v.get_default_arrow_type(), v.name) + if isinstance(v, SemanticType) + else (python_to_arrow_type(v), None) + for k, v in self.items() + } + + def to_arrow_schema(self) -> pa.Schema: + """ + Get the Arrow schema, which is a PythonSchema representation of the semantic schema. + + Returns + ------- + PythonSchema + A new schema mapping keys to Python types. + """ + fields = [] + for k, (arrow_type, semantic_type_name) in self.get_arrow_types().items(): + if semantic_type_name is not None: + field = pa.field( + k, + arrow_type, + metadata={b"semantic_type": semantic_type_name.encode("utf-8")}, + ) + else: + field = pa.field(k, arrow_type) + fields.append(field) + + return pa.schema(fields) + + def to_python_schema(self) -> PythonSchema: + """ + Get the Python schema, which is a PythonSchema representation of the semantic schema. + + Returns + ------- + PythonSchema + A new schema mapping keys to Python types. + """ + return PythonSchema.from_semantic_schema(self) + + @classmethod + def from_arrow_schema( + cls, + arrow_schema: pa.Schema, + semantic_type_registry: SemanticTypeRegistry, + ) -> Self: + """ + Create a SemanticSchema from an Arrow schema. + + Parameters + ---------- + arrow_schema : pa.Schema + The Arrow schema to convert. + + Returns + ------- + SemanticSchema + A new schema mapping keys to tuples of Python types and optional semantic type identifiers. + """ + + semantic_schema = {} + for field in arrow_schema: + field_type = None + if field.metadata is not None: + semantic_type_name = field.metadata.get(b"semantic_type", b"").decode() + if semantic_type_name: + semantic_type = semantic_type_registry.get_semantic_type( + semantic_type_name + ) + if semantic_type is None: + raise ValueError( + f"Semantic type '{semantic_type_name}' not found in registry" + ) + if not semantic_type.supports_arrow_type(field.type): + raise ValueError( + f"Semantic type '{semantic_type.name}' does not support Arrow field of type '{field.type}'" + ) + field_type = semantic_type + + if ( + field_type is None + ): # was not set to semantic type, so fallback to simple conversion + field_type = arrow_to_python_type(field.type) + + semantic_schema[field.name] = field_type + return cls(semantic_schema) + + @classmethod + def from_typespec( + cls, + typespec: TypeSpec, + semantic_type_registry: SemanticTypeRegistry, + ) -> Self: + semantic_schema = {} + for key, python_type in typespec.items(): + semantic_type = semantic_type_registry.get_semantic_type_for_python_type( + python_type + ) + if semantic_type is not None: + semantic_schema[key] = semantic_type + else: + semantic_schema[key] = python_type + return cls(semantic_schema) diff --git a/src/orcapod/types/semantic_converter.py b/src/orcapod/types/semantic_converter.py new file mode 100644 index 0000000..047ad2c --- /dev/null +++ b/src/orcapod/types/semantic_converter.py @@ -0,0 +1,135 @@ +from orcapod.types.semantic_types import PythonArrowConverter +from orcapod.types.schemas import PythonSchema, SemanticSchema +from orcapod.types import TypeSpec, typespec_utils as tsutils + +from typing import Any, Self +from collections.abc import Mapping +import pyarrow as pa +import logging + +logger = logging.getLogger(__name__) + + +class SemanticConverter: + @classmethod + def from_semantic_schema(cls, semantic_schema: SemanticSchema) -> Self: + converter_lut = {} + for ( + field, + semantic_type, + ) in semantic_schema.get_semantic_fields().items(): + converter_lut[field] = PythonArrowConverter.from_semantic_type( + semantic_type + ) + return cls(converter_lut) + + def __init__( + self, + converter_lut: dict[str, PythonArrowConverter], + ): + self._converter_lut = converter_lut + + def from_python_to_arrow_schema(self, python_schema: TypeSpec) -> pa.Schema: + """Convert a Python schema to an Arrow schema""" + return PythonSchema(python_schema).to_arrow_schema( + converters=self._converter_lut + ) + + def from_arrow_to_python_schema(self, arrow_schema: pa.Schema) -> PythonSchema: + """Convert an Arrow schema to a Python schema""" + return PythonSchema.from_arrow_schema( + arrow_schema, converters=self._converter_lut + ) + + def from_python_to_arrow( + self, python_data: Mapping[str, Any], python_schema: TypeSpec | None = None + ) -> pa.Table: + """Convert a dictionary of Python values to Arrow arrays""" + if python_schema is None: + # infer schema from data + python_schema = PythonSchema(tsutils.get_typespec_from_dict(python_data)) + logger.warning( + f"Inferred schema {python_schema} from Python data {python_data}. Note that this may not behave as expected." + ) + + arrow_schema = self.from_python_to_arrow_schema(python_schema) + + arrow_data = {} + for field, value in python_data.items(): + if field in self._converter_lut: + converter = self._converter_lut[field] + arrow_data[field] = converter.from_python_to_arrow(value) + else: + arrow_data[field] = [value] + return pa.Table.from_pydict(arrow_data, schema=arrow_schema) + + def from_arrow_to_arrow_compat_dict( + self, arrow_data: pa.Table + ) -> list[dict[str, Any]]: + """Convert Arrow data to a dictionary of Python values""" + return arrow_data.to_pylist() + + def from_python_to_arrow_compat_dict( + self, python_data: Mapping[str, Any] + ) -> dict[str, Any]: + arrow_compat_dict = dict(python_data) + for field, converter in self._converter_lut.items(): + if field in python_data: + arrow_compat_dict[field] = converter.from_python_to_arrow( + python_data[field] + ) + return arrow_compat_dict + + def from_arrow_to_python(self, arrow_data: pa.Table) -> list[dict[str, Any]]: + """Convert a dictionary of Arrow arrays to Python values""" + + values = [] + for column_name in arrow_data.column_names: + column = arrow_data[column_name] + if column_name not in self._converter_lut: + values.append(column.to_pylist()) + else: + converter = self._converter_lut[column_name] + values.append(converter.from_arrow_to_python(column)) + all_entries = [] + + for entry in zip(*values): + assert len(entry) == len(arrow_data.column_names), ( + "Mismatch in number of columns and values" + ) + all_entries.append(dict(zip(arrow_data.column_names, entry))) + + return all_entries + + def as_dict(self) -> dict[str, PythonArrowConverter]: + """Return the converter lookup table as a dictionary.""" + return self._converter_lut.copy() + + def join(self, other: Self, strict: bool = False) -> Self: + """Join two SemanticConverters by merging their converter lookup tables.""" + if not isinstance(other, SemanticConverter): + raise TypeError("Can only join with another SemanticConverter.") + + new_converter_lut = self._converter_lut.copy() + for key, converter in other._converter_lut.items(): + if key in new_converter_lut: + if strict: + raise ValueError( + f"Key '{key}' already exists in the converter lookup table. Cannot overwrite in strict mode." + ) + logger.warning( + f"Key '{key}' already exists in the converter lookup table. Overwriting with new converter." + ) + new_converter_lut[key] = converter + + return self.__class__(new_converter_lut) + + def rename(self, column_mapping: Mapping[str, str]) -> Self: + """Rename columns in the converter lookup table.""" + new_converter_lut = {} + new_converter_lut = { + column_mapping.get(key, key): converter + for key, converter in self._converter_lut.items() + } + + return self.__class__(new_converter_lut) diff --git a/src/orcapod/types/semantic_types.py b/src/orcapod/types/semantic_types.py new file mode 100644 index 0000000..c0eaef2 --- /dev/null +++ b/src/orcapod/types/semantic_types.py @@ -0,0 +1,623 @@ +from typing import Any, Self, cast +from abc import ABC, abstractmethod +from dataclasses import dataclass +from pathlib import Path +import pyarrow as pa + +from collections.abc import Collection + + +# Converter interfaces using modern generics with ABC +class PythonConverter[T, R](ABC): + """ + Abstract base class for converters between canonical and Python representation types. + T: canonical type, R: Python representation type + """ + + def __init__(self): + # Automatically infer types from inheritance + self._python_type = self._infer_python_type() + + def _infer_python_type(self) -> type[R]: + """Infer the Python type from __orig_bases__""" + for base in getattr(self.__class__, "__orig_bases__", []): + if hasattr(base, "__origin__") and issubclass( + base.__origin__, PythonConverter + ): + # Get the R type parameter (second argument) + args = getattr(base, "__args__", ()) + if len(args) >= 2: + return args[1] # R is the second type parameter + raise RuntimeError(f"Could not infer Python type for {self.__class__.__name__}") + + @abstractmethod + def to_canonical(self, value: R) -> T: + """Convert from Python representation to canonical form""" + pass + + @abstractmethod + def from_canonical(self, value: T) -> R: + """Convert from canonical to Python representation form""" + pass + + @abstractmethod + def can_handle(self, python_type: type) -> bool: ... + + def get_python_type(self) -> type[R]: + """Get the Python type this converter converts into (auto-inferred)""" + return self._python_type + + +class ArrowConverter[T](ABC): + """ + Abstract base class for converters between canonical and Arrow representation types. + T: canonical type + """ + + @abstractmethod + def to_canonical(self, value: pa.Array) -> list[T]: + """Convert from Arrow representation to canonical form""" + pass + + # @abstractmethod + # def from_canonical_to_arrow_compatible(self, value: T) -> Any: + # """Convert from canonical to Arrow-compatible representation""" + # pass + + # @abstractmethod + # def from_arrow_compatible_to_canonical(self, value: Any) -> T: + # """Convert from Arrow-compatible representation to canonical form""" + # pass + + @abstractmethod + def from_canonical(self, value: T | Collection[T]) -> pa.Array: + """Convert from canonical to Arrow representation""" + pass + + @abstractmethod + def can_handle(self, arrow_type: pa.DataType) -> bool: ... + + @abstractmethod + def get_arrow_type(self) -> pa.DataType: + """Get the Arrow DataType this converter handles""" + pass + + +# Canonical types with explicit definitions +@dataclass(frozen=True) +class CanonicalPath: + """Canonical representation of a file system path""" + + path_str: str + is_absolute: bool = False + + def __str__(self) -> str: + return self.path_str + + def __post_init__(self) -> None: + if not self.path_str: + raise ValueError("Path string cannot be empty") + + +@dataclass(frozen=True) +class CanonicalTimestamp: + """Canonical representation of a timestamp""" + + timestamp: int + timezone: str = "UTC" + + def __post_init__(self) -> None: + if self.timestamp < 0: + raise ValueError("Timestamp cannot be negative") + + +@dataclass(frozen=True) +class CanonicalURL: + """Canonical representation of a URL""" + + url: str + scheme: str + host: str + + def __post_init__(self) -> None: + if not self.url.startswith(f"{self.scheme}://"): + raise ValueError(f"URL must start with {self.scheme}://") + + +# Python converters for Path +class PathlibPathConverter(PythonConverter[CanonicalPath, Path]): + """Converter for pathlib.Path objects""" + + def to_canonical(self, value: Path) -> CanonicalPath: + return CanonicalPath(path_str=str(value), is_absolute=value.is_absolute()) + + def from_canonical(self, value: CanonicalPath) -> Path: + return Path(value.path_str) + + def can_handle(self, python_type: type) -> bool: + return issubclass(python_type, Path) + + +# Arrow converters for Path +class ArrowStringPathConverter(ArrowConverter[CanonicalPath]): + """Converter for Arrow string representation of paths""" + + def to_canonical(self, value: pa.Array) -> list[CanonicalPath]: + return [ + CanonicalPath(v, is_absolute=Path(v).is_absolute()) + for v in value.to_pylist() + ] + + def from_canonical( + self, value: CanonicalPath | Collection[CanonicalPath] + ) -> pa.Array: + if isinstance(value, CanonicalPath): + value = [value] + return pa.array([v.path_str for v in value], type=pa.large_string()) + + def from_canonical_to_arrow_compatible(self, value: CanonicalPath) -> str: + return value.path_str + + def from_arrow_compatible_to_canonical(self, value: str) -> CanonicalPath: + return CanonicalPath(path_str=value, is_absolute=Path(value).is_absolute()) + + def can_handle(self, arrow_type: pa.DataType) -> bool: + return arrow_type == pa.large_string() + + def get_arrow_type(self) -> pa.DataType: + return pa.large_string() + + +# Enhanced SemanticType with explicit Python and Arrow handling +class SemanticType[T]: + """ + Represents a semantic type with explicit Python/Arrow converters. + + A SemanticType is a central concept that: + 1. Defines a canonical representation (T) for a domain concept + 2. Manages separate Python and Arrow converters + 3. Provides explicit methods for Python and Arrow operations + 4. Maintains type safety while allowing runtime discovery + + Type parameter T represents the canonical representation type. + """ + + def __init__( + self, + name: str, + description: str = "", + python_converters: Collection[PythonConverter[T, Any]] | None = None, + arrow_converters: Collection[ArrowConverter[T]] | None = None, + ): + self.name = name + self.description = description + + self._python_type_converters: list[PythonConverter[T, Any]] = [] + self._arrow_type_converters: list[ArrowConverter[T]] = [] + + # Default converters + self._default_python_converter: PythonConverter[T, Any] | None = None + self._default_arrow_converter: ArrowConverter[T] | None = None + + if python_converters is not None: + for converter in python_converters: + self.register_python_converter( + converter, + set_default=self._default_python_converter is None, + force=False, + ) + + if arrow_converters is not None: + for converter in arrow_converters: + self.register_arrow_converter( + converter, + set_default=self._default_arrow_converter is None, + force=False, + ) + + def get_default_python_type(self) -> type[T]: + """Get the default Python type for this semantic type""" + if self._default_python_converter: + return self._default_python_converter.get_python_type() + raise ValueError( + f"No default Python converter registered for semantic type '{self.name}'" + ) + + def get_default_arrow_type(self) -> pa.DataType: + """Get the default Arrow DataType for this semantic type""" + if self._default_arrow_converter: + return self._default_arrow_converter.get_arrow_type() + raise ValueError( + f"No default Arrow converter registered for semantic type '{self.name}'" + ) + + def register_python_converter[R]( + self, + converter: PythonConverter[T, R], + set_default: bool = False, + force: bool = False, + ): + """ + Register a Python converter + """ + if converter not in self._python_type_converters: + self._python_type_converters.append(converter) + + if set_default: + if self._default_python_converter is not None and not force: + raise ValueError( + f"Default Python converter already set for semantic type '{self.name}'" + ) + self._default_python_converter = converter + + def register_arrow_converter( + self, + converter: ArrowConverter[T], + set_default: bool = False, + force: bool = False, + ) -> None: + """Register an Arrow converter""" + if converter not in self._arrow_type_converters: + self._arrow_type_converters.append(converter) + + if set_default: + if self._default_arrow_converter is not None and not force: + raise ValueError( + f"Default Arrow converter already set for semantic type '{self.name}'" + ) + self._default_arrow_converter = converter + + # Python-specific methods + def get_python_converter_for_type( + self, python_type: type + ) -> PythonConverter[T, Any] | None: + """Find a Python converter that can handle the given type""" + for converter in self._python_type_converters: + if converter.can_handle(python_type): + return converter + return None + + def get_arrow_converter_for_type( + self, arrow_type: pa.DataType + ) -> ArrowConverter[T] | None: + """Find an Arrow converter for the given Arrow DataType""" + for converter in self._arrow_type_converters: + if converter.can_handle(arrow_type): + return converter + return None + + def get_python_converter_with_output_type( + self, output_type: type + ) -> PythonConverter[T, Any] | None: + """Get a Python converter that can handle the specified output type""" + for converter in self._python_type_converters: + if issubclass(converter.get_python_type(), output_type): + return converter + return None + + def get_arrow_converter_with_output_type( + self, output_type: pa.DataType + ) -> ArrowConverter[T] | None: + for converter in self._arrow_type_converters: + if output_type == converter.get_arrow_type(): + return converter + return None + + def supports_python_type(self, python_type: type) -> bool: + return self.get_python_converter_for_type(python_type) is not None + + def supports_arrow_type(self, arrow_type: pa.DataType) -> bool: + return self.get_arrow_converter_for_type(arrow_type) is not None + + @property + def default_python_converter(self) -> PythonConverter[T, Any] | None: + """Get the default Python converter""" + return self._default_python_converter + + @property + def default_arrow_converter(self) -> ArrowConverter[T] | None: + return self._default_arrow_converter + + def to_canonical_from_python(self, value: Any) -> T: + """Convert Python value to canonical form""" + converter = self.get_python_converter_for_type(type(value)) + if not converter: + raise ValueError( + f"No Python converter found for {type(value)} in semantic type '{self.name}'" + ) + + return converter.to_canonical(value) + + def from_canonical_to_python( + self, value: T, target_type: type | None = None + ) -> Any: + """Convert from canonical to Python representation""" + if target_type is None: + converter = self.default_python_converter + if not converter: + raise ValueError( + f"No default Python converter for semantic type '{self.name}'" + ) + else: + converter = self.get_python_converter_for_type(target_type) + if not converter: + raise ValueError( + f"No converter found for target type '{target_type}' in semantic type '{self.name}'" + ) + + return converter.from_canonical(value) + + def to_canonical_from_arrow(self, value: pa.Array) -> list[T]: + """Convert Arrow value to canonical form using explicit Arrow DataType""" + converter = self.get_arrow_converter_for_type(value.type) + if not converter: + raise ValueError( + f"No Arrow converter found for type '{value.type}' in semantic type '{self.name}'" + ) + + canonical = converter.to_canonical(value) + + return canonical + + def from_canonical_to_arrow( + self, value: T, target_type: pa.DataType | None = None + ) -> pa.Array: + """Convert from canonical to Arrow representation using explicit Arrow DataType""" + + if target_type is None: + converter = self.default_arrow_converter + if not converter: + raise ValueError( + f"No default Arrow converter for semantic type '{self.name}'" + ) + else: + converter = self.get_arrow_converter_for_type(target_type) + if not converter: + raise ValueError( + f"No Arrow converter found for target type '{target_type}' in semantic type '{self.name}'" + ) + + return converter.from_canonical(value) + + def get_python_types(self) -> list[type]: + """Get all supported output Python DataTypes""" + return [ + converter.get_python_type() for converter in self._python_type_converters + ] + + def get_arrow_types(self) -> list[pa.DataType]: + """Get all supported output Arrow DataTypes""" + return [converter.get_arrow_type() for converter in self._arrow_type_converters] + + # Cross-system conversion methods + def convert_python_to_arrow( + self, python_value: Any, arrow_type: pa.DataType | None = None + ) -> Any: + """Convert directly from Python to Arrow representation""" + canonical = self.to_canonical_from_python(python_value) + return self.from_canonical_to_arrow(canonical, arrow_type) + + def convert_arrow_to_python( + self, arrow_value, python_type: type | None = None + ) -> list[Any]: + """Convert directly from Arrow to Python representation""" + canonical_values = self.to_canonical_from_arrow(arrow_value) + return [ + self.from_canonical_to_python(value, target_type=python_type) + for value in canonical_values + ] + + def __str__(self) -> str: + return f"SemanticType(name='{self.name}')" + + def __repr__(self) -> str: + python_count = len(self._python_type_converters) + arrow_count = len(self._arrow_type_converters) + return ( + f"SemanticType(name='{self.name}', " + f"python_converters={python_count}, " + f"arrow_converters={arrow_count})" + ) + + +# Registry with explicit Python and Arrow handling +class SemanticTypeRegistry: + """Registry that manages SemanticType objects with explicit Python/Arrow operations""" + + def __init__(self, semantic_types: Collection[SemanticType] | None = None): + self._semantic_type_lut: dict[str, SemanticType] = {} + self._python_to_semantic_lut: dict[type, SemanticType] = {} + if semantic_types is not None: + for semantic_type in semantic_types: + self.register_semantic_type(semantic_type) + + def register_semantic_type[T](self, semantic_type: SemanticType[T]): + """Register a semantic type""" + if semantic_type.name not in self._semantic_type_lut: + self._semantic_type_lut[semantic_type.name] = semantic_type + else: + raise ValueError( + f"Semantic type {self._semantic_type_lut[semantic_type.name]} is already registered for semantic name {semantic_type.name}" + ) + + python_type = semantic_type.get_default_python_type() + if python_type is None: + raise ValueError( + f"Semantic type {semantic_type.name} does not have a default Python type" + ) + if python_type in self._python_to_semantic_lut: + raise ValueError( + f"Python type {python_type} is already registered for semantic type {self._python_to_semantic_lut[python_type]}" + ) + self._python_to_semantic_lut[python_type] = semantic_type + + def get_semantic_type_for_python_type( + self, python_type: type + ) -> SemanticType | None: + """Get a semantic type by Python type""" + + # check if it's directly registered + semantic_type = self._python_to_semantic_lut.get(python_type) + if semantic_type is None: + # check if it's a subclass + for ( + registered_type, + registered_semantic_type, + ) in self._python_to_semantic_lut.items(): + if issubclass(python_type, registered_type): + return registered_semantic_type + return semantic_type + + def get_arrow_type_for_semantic_type( + self, semantic_type_name: str + ) -> pa.DataType | None: + """Get the default Arrow DataType for a semantic type by name""" + semantic_type = self._semantic_type_lut.get(semantic_type_name) + if semantic_type: + return semantic_type.get_default_arrow_type() + return None + + def get_arrow_type_for_python_type( + self, python_type: type + ) -> tuple[str | None, pa.DataType] | None: + """Get the default Arrow DataType for a Python type""" + semantic_type = self.get_semantic_type_for_python_type(python_type) + if semantic_type: + return semantic_type.name, semantic_type.get_default_arrow_type() + return None + + def from_python_to_arrow(self, python_value: Any) -> tuple[str | None, Any]: + """Convert a Python value to Arrow-targetting representation using the semantic type registry""" + semantic_type = self.get_semantic_type_for_python_type(type(python_value)) + if semantic_type: + return semantic_type.name, semantic_type.convert_python_to_arrow( + python_value + ) + return None, python_value + + def get_semantic_type(self, name: str) -> SemanticType | None: + """Get a semantic type by name""" + return self._semantic_type_lut.get(name) + + def list_semantic_types(self) -> list[SemanticType]: + """Get all registered semantic types""" + return list(self._semantic_type_lut.values()) + + def registered_with_semantic_type(self, python_type: type) -> bool: + """Check if registry has the Python type registered with a semantic type""" + return python_type in self._python_to_semantic_lut + + def supports_semantic_and_arrow_type( + self, semantic_type_name: str, arrow_type: pa.DataType + ) -> bool: + """Check if registry supports the given semantic type and Arrow DataType combination""" + semantic_type = self._semantic_type_lut.get(semantic_type_name) + if not semantic_type: + return False + return semantic_type.supports_arrow_type(arrow_type) + + +# Type-safe wrapper for semantic values +class SemanticValue[T]: + """Type-safe wrapper for semantic values""" + + def __init__(self, value: T, semantic_type: SemanticType[T]): + self._value = value + self._semantic_type = semantic_type + + @property + def value(self) -> T: + return self._value + + @property + def semantic_type(self) -> SemanticType[T]: + return self._semantic_type + + def to_python(self) -> Any: + """Convert to Python representation""" + return self._semantic_type.from_canonical_to_python(self._value) + + def to_python_type(self, python_type: type) -> Any: + """Convert to Arrow representation using specific Arrow DataType""" + return self._semantic_type.from_canonical_to_arrow(self._value, python_type) + + def to_arrow(self) -> Any: + """Convert to Arrow representation using default dtype""" + return self._semantic_type.from_canonical_to_arrow(self._value) + + def to_arrow_with_type(self, arrow_type: pa.DataType) -> Any: + """Convert to Arrow representation using specific Arrow DataType""" + return self._semantic_type.from_canonical_to_arrow(self._value, arrow_type) + + @classmethod + def from_python(cls, python_value: Any, semantic_type: SemanticType[T]) -> Self: + """Create from a Python value""" + canonical = semantic_type.to_canonical_from_python(python_value) + return cls(canonical, semantic_type) + + @classmethod + def from_arrow(cls, arrow_value: Any, semantic_type: SemanticType[T]) -> Self: + """Create from an Arrow value with explicit Arrow DataType""" + canonical = semantic_type.to_canonical_from_arrow(arrow_value) + if len(canonical) != 1: + raise ValueError( + f"Expected single value from Arrow, got {len(canonical)} values" + ) + return cls(canonical[0], semantic_type) + + def __str__(self) -> str: + return f"SemanticValue({self._value}, {self._semantic_type.name})" + + def __repr__(self) -> str: + return f"SemanticValue(value={self._value!r}, semantic_type={self._semantic_type.name})" + + +class PythonArrowConverter[T, R]: + @classmethod + def from_semantic_type(cls, semantic_type: SemanticType[T]) -> Self: + """Create a PythonArrowConverter from a SemanticType""" + python_converter = semantic_type.default_python_converter + arrow_converter = semantic_type.default_arrow_converter + + if not python_converter or not arrow_converter: + raise ValueError( + f"Semantic type '{semantic_type.name}' does not have default converters" + ) + + return cls(python_converter, arrow_converter, semantic_type.name) + + def __init__( + self, + python_converter: PythonConverter[T, R], + arrow_converter: ArrowConverter[T], + semantic_type_name: str | None = None, + ): + self.python_converter = python_converter + self.arrow_converter = arrow_converter + self.semantic_type_name = semantic_type_name + + @property + def python_type(self) -> type[R]: + """Get the Python type this converter handles""" + return self.python_converter.get_python_type() + + @property + def arrow_type(self) -> pa.DataType: + """Get the Arrow DataType this converter handles""" + return self.arrow_converter.get_arrow_type() + + def from_python_to_arrow(self, python_value: R | Collection[R]) -> pa.Array: + """Convert from Python to Arrow representation""" + if isinstance(python_value, self.python_type): + python_value = [python_value] + assert isinstance(python_value, Collection), ( + "Expected a collection of values at this point" + ) + python_values = cast(Collection[R], python_value) + canonicals = [self.python_converter.to_canonical(val) for val in python_values] + return self.arrow_converter.from_canonical(canonicals) + + def from_arrow_to_python(self, arrow_value: pa.Array) -> list[R]: + """Convert from Arrow to Python representation""" + canonical = self.arrow_converter.to_canonical(arrow_value) + return [self.python_converter.from_canonical(value) for value in canonical] diff --git a/src/orcapod/types/inference.py b/src/orcapod/types/typespec_utils.py similarity index 70% rename from src/orcapod/types/inference.py rename to src/orcapod/types/typespec_utils.py index 2f18f39..609a6a0 100644 --- a/src/orcapod/types/inference.py +++ b/src/orcapod/types/typespec_utils.py @@ -1,20 +1,19 @@ -# Library of functions for inferring types for FunctionPod input and output parameters. +# Library of functions for working with TypeSpecs and for extracting TypeSpecs from a function's signature - -from collections.abc import Callable, Collection, Sequence -from typing import get_origin, get_args, TypeAlias -from .core import TypeSpec +from collections.abc import Callable, Collection, Sequence, Mapping +from typing import get_origin, get_args, Any +from orcapod.types.core import TypeSpec import inspect import logging - logger = logging.getLogger(__name__) def verify_against_typespec(packet: dict, typespec: TypeSpec) -> bool: """Verify that the dictionary's types match the expected types in the typespec.""" from beartype.door import is_bearable + # verify that packet contains no keys not in typespec if set(packet.keys()) - set(typespec.keys()): logger.warning( @@ -40,6 +39,7 @@ def check_typespec_compatibility( incoming_types: TypeSpec, receiving_types: TypeSpec ) -> bool: from beartype.door import is_subhint + for key, type_info in incoming_types.items(): if key not in receiving_types: logger.warning(f"Key '{key}' not found in parameter types.") @@ -52,11 +52,11 @@ def check_typespec_compatibility( return True -def extract_function_data_types( +def extract_function_typespecs( func: Callable, output_keys: Collection[str], - input_types: TypeSpec | None = None, - output_types: TypeSpec | Sequence[type] | None = None, + input_typespec: TypeSpec | None = None, + output_typespec: TypeSpec | Sequence[type] | None = None, ) -> tuple[TypeSpec, TypeSpec]: """ Extract input and output data types from a function signature. @@ -137,23 +137,23 @@ def extract_function_data_types( {'count': , 'total': , 'repr': } """ verified_output_types: TypeSpec = {} - if output_types is not None: - if isinstance(output_types, dict): - verified_output_types = output_types - elif isinstance(output_types, Sequence): + if output_typespec is not None: + if isinstance(output_typespec, dict): + verified_output_types = output_typespec + elif isinstance(output_typespec, Sequence): # If output_types is a collection, convert it to a dict with keys from return_keys - if len(output_types) != len(output_keys): + if len(output_typespec) != len(output_keys): raise ValueError( - f"Output types collection length {len(output_types)} does not match return keys length {len(output_keys)}." + f"Output types collection length {len(output_typespec)} does not match return keys length {len(output_keys)}." ) - verified_output_types = {k: v for k, v in zip(output_keys, output_types)} + verified_output_types = {k: v for k, v in zip(output_keys, output_typespec)} signature = inspect.signature(func) param_info: TypeSpec = {} for name, param in signature.parameters.items(): - if input_types and name in input_types: - param_info[name] = input_types[name] + if input_typespec and name in input_typespec: + param_info[name] = input_typespec[name] else: # check if the parameter has annotation if param.annotation is not inspect.Signature.empty: @@ -212,3 +212,90 @@ def extract_function_data_types( f"Type for return item '{key}' is not specified in output_types and has no type annotation in function signature." ) return param_info, inferred_output_types + + +def get_typespec_from_dict( + data: Mapping, typespec: TypeSpec | None = None, default=str +) -> TypeSpec: + """ + Returns a TypeSpec for the given dictionary. + The TypeSpec is a mapping from field name to Python type. If typespec is provided, then + it is used as a base when inferring types for the fields in dict + """ + if typespec is None: + typespec = {} + return { + key: typespec.get(key, type(value) if value is not None else default) + for key, value in data.items() + } + + +def get_compatible_type(type1: Any, type2: Any) -> Any: + if type1 is type2: + return type1 + if issubclass(type1, type2): + return type2 + if issubclass(type2, type1): + return type1 + raise TypeError(f"Types {type1} and {type2} are not compatible") + + +def union_typespecs(*typespecs: TypeSpec) -> TypeSpec: + # Merge the two TypeSpecs but raise an error if conflicts in types are found + merged = dict(typespecs[0]) + for typespec in typespecs[1:]: + for key, right_type in typespec.items(): + merged[key] = ( + get_compatible_type(merged[key], right_type) + if key in merged + else right_type + ) + return merged + + +def intersection_typespecs(*typespecs: TypeSpec) -> TypeSpec: + """ + Returns the intersection of all TypeSpecs, only returning keys that are present in all typespecs. + If a key is present in both TypeSpecs, the type must be the same. + """ + + # Find common keys and ensure types match + + common_keys = set(typespecs[0].keys()) + for typespec in typespecs[1:]: + common_keys.intersection_update(typespec.keys()) + + intersection = {k: typespecs[0][k] for k in common_keys} + for typespec in typespecs[1:]: + for key in common_keys: + try: + intersection[key] = get_compatible_type( + intersection[key], typespec[key] + ) + except TypeError: + # If types are not compatible, raise an error + raise TypeError( + f"Type conflict for key '{key}': {intersection[key]} vs {typespec[key]}" + ) + return intersection + + +# def intersection_typespecs(left: TypeSpec, right: TypeSpec) -> TypeSpec: +# """ +# Returns the intersection of two TypeSpecs, only returning keys that are present in both. +# If a key is present in both TypeSpecs, the type must be the same. +# """ + +# # Find common keys and ensure types match +# common_keys = set(left.keys()).intersection(set(right.keys())) +# intersection = {} +# for key in common_keys: +# try: +# intersection[key] = get_compatible_type(left[key], right[key]) +# except TypeError: +# # If types are not compatible, raise an error +# raise TypeError( +# f"Type conflict for key '{key}': {left[key]} vs {right[key]}" +# ) + +# return intersection diff --git a/src/orcapod/types/utils.py b/src/orcapod/types/utils.py deleted file mode 100644 index 5393492..0000000 --- a/src/orcapod/types/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# TODO: move these functions to util -def escape_with_postfix(field: str, postfix=None, separator="_") -> str: - """ - Escape the field string by doubling separators and optionally append a postfix. - This function takes a field string and escapes any occurrences of the separator - by doubling them, then optionally appends a postfix with a separator prefix. - - Args: - field (str): The input string containing to be escaped. - postfix (str, optional): An optional postfix to append to the escaped string. - If None, no postfix is added. Defaults to None. - separator (str, optional): The separator character to escape and use for - prefixing the postfix. Defaults to "_". - Returns: - str: The escaped string with optional postfix. Returns empty string if - fields is provided but postfix is None. - Examples: - >>> escape_with_postfix("field1_field2", "suffix") - 'field1__field2_suffix' - >>> escape_with_postfix("name_age_city", "backup", "_") - 'name__age__city_backup' - >>> escape_with_postfix("data-info", "temp", "-") - 'data--info-temp' - >>> escape_with_postfix("simple", None) - 'simple' - >>> escape_with_postfix("no_separators", "end") - 'no__separators_end' - """ - - return field.replace(separator, separator * 2) + (f"_{postfix}" if postfix else "") - - -def unescape_with_postfix(field: str, separator="_") -> tuple[str, str | None]: - """ - Unescape a string by converting double separators back to single separators and extract postfix metadata. - This function reverses the escaping process where single separators were doubled to avoid - conflicts with metadata delimiters. It splits the input on double separators, then extracts - any postfix metadata from the last part. - - Args: - field (str): The escaped string containing doubled separators and optional postfix metadata - separator (str, optional): The separator character used for escaping. Defaults to "_" - Returns: - tuple[str, str | None]: A tuple containing: - - The unescaped string with single separators restored - - The postfix metadata if present, None otherwise - Examples: - >>> unescape_with_postfix("field1__field2__field3") - ('field1_field2_field3', None) - >>> unescape_with_postfix("field1__field2_metadata") - ('field1_field2', 'metadata') - >>> unescape_with_postfix("simple") - ('simple', None) - >>> unescape_with_postfix("field1--field2", separator="-") - ('field1-field2', None) - >>> unescape_with_postfix("field1--field2-meta", separator="-") - ('field1-field2', 'meta') - """ - - parts = field.split(separator * 2) - parts[-1], *meta = parts[-1].split("_", 1) - return separator.join(parts), meta[0] if meta else None diff --git a/src/orcapod/utils/arrow_utils.py b/src/orcapod/utils/arrow_utils.py new file mode 100644 index 0000000..700fa3e --- /dev/null +++ b/src/orcapod/utils/arrow_utils.py @@ -0,0 +1,277 @@ +# TODO: move this to a separate module + +from collections import defaultdict +import pyarrow as pa +from collections.abc import Mapping, Collection +from typing import Any + + +def join_arrow_schemas(*schemas: pa.Schema) -> pa.Schema: + """Join multiple Arrow schemas into a single schema, ensuring compatibility of fields. In particular, + no field names should collide.""" + merged_fields = [] + for schema in schemas: + merged_fields.extend(schema) + return pa.schema(merged_fields) + + +def hstack_tables(*tables: pa.Table) -> pa.Table: + """ + Horizontally stack multiple PyArrow tables by concatenating their columns. + + All input tables must have the same number of rows and unique column names. + + Args: + *tables: Variable number of PyArrow tables to stack horizontally + + Returns: + Combined PyArrow table with all columns from input tables + + Raises: + ValueError: If no tables provided, tables have different row counts, + or duplicate column names are found + """ + if len(tables) == 0: + raise ValueError("At least one table is required for horizontal stacking.") + if len(tables) == 1: + return tables[0] + + N = len(tables[0]) + for table in tables[1:]: + if len(table) != N: + raise ValueError( + "All tables must have the same number of rows for horizontal stacking." + ) + + # create combined schema + all_fields = [] + all_names = set() + for table in tables: + for field in table.schema: + if field.name in all_names: + raise ValueError( + f"Duplicate column name '{field.name}' found in input tables." + ) + all_fields.append(field) + all_names.add(field.name) + combined_schmea = pa.schema(all_fields) + + # create combined columns + all_columns = [] + for table in tables: + all_columns += table.columns + + return pa.Table.from_arrays(all_columns, schema=combined_schmea) + + +def check_arrow_schema_compatibility( + incoming_schema: pa.Schema, target_schema: pa.Schema, strict: bool = False +) -> tuple[bool, list[str]]: + # TODO: add strict comparison + """ + Check if incoming schema is compatible with current schema. + + Args: + incoming_schema: Schema to validate + target_schema: Expected schema to match against + strict: If True, requires exact match of field names and types. If False (default), + incoming_schema can have additional fields or different types as long as they are compatible. + + Returns: + Tuple of (is_compatible, list_of_errors) + """ + errors = [] + + # Create lookup dictionaries for efficient access + incoming_fields = {field.name: field for field in incoming_schema} + target_fields = {field.name: field for field in target_schema} + + # Check each field in target_schema + for field_name, target_field in target_fields.items(): + if field_name not in incoming_fields: + errors.append(f"Missing field '{field_name}' in incoming schema") + continue + + incoming_field = incoming_fields[field_name] + + # Check data type compatibility + if not target_field.type.equals(incoming_field.type): + # TODO: if not strict, allow type coercion + errors.append( + f"Type mismatch for field '{field_name}': " + f"expected {target_field.type}, got {incoming_field.type}" + ) + + # Check semantic_type metadata if present in current schema + current_metadata = target_field.metadata or {} + incoming_metadata = incoming_field.metadata or {} + + if b"semantic_type" in current_metadata: + expected_semantic_type = current_metadata[b"semantic_type"] + + if b"semantic_type" not in incoming_metadata: + errors.append( + f"Missing 'semantic_type' metadata for field '{field_name}'" + ) + elif incoming_metadata[b"semantic_type"] != expected_semantic_type: + errors.append( + f"Semantic type mismatch for field '{field_name}': " + f"expected {expected_semantic_type.decode()}, " + f"got {incoming_metadata[b'semantic_type'].decode()}" + ) + elif b"semantic_type" in incoming_metadata: + errors.append( + f"Unexpected 'semantic_type' metadata for field '{field_name}': " + f"{incoming_metadata[b'semantic_type'].decode()}" + ) + + # If strict mode, check for additional fields in incoming schema + if strict: + for field_name in incoming_fields: + if field_name not in target_fields: + errors.append(f"Unexpected field '{field_name}' in incoming schema") + + return len(errors) == 0, errors + + +def split_by_column_groups( + table, + *column_groups: Collection[str], +) -> tuple[pa.Table | None, ...]: + """ + Split the table into multiple tables based on the provided column groups. + Each group is a collection of column names that should be included in the same table. + The remaining columns that are not part of any group will be returned as the first table/None. + """ + if not column_groups: + return (table,) + + tables = [] + remaining_columns = set(table.column_names) + + for group in column_groups: + group_columns = [col for col in group if col in remaining_columns] + if group_columns: + tables.append(table.select(group_columns)) + remaining_columns.difference_update(group_columns) + else: + tables.append(None) + + remaining_table = None + if remaining_columns: + ordered_remaining_columns = [ + col for col in table.column_names if col in remaining_columns + ] + remaining_table = table.select(ordered_remaining_columns) + return (remaining_table, *tables) + + +def prepare_prefixed_columns( + table: pa.Table, + prefix_info: Collection[str] + | Mapping[str, Any | None] + | Mapping[str, Mapping[str, Any | None]], + exclude_columns: Collection[str] = (), + exclude_prefixes: Collection[str] = (), +) -> tuple[pa.Table, dict[str, pa.Table]]: + """ """ + all_prefix_info = {} + if isinstance(prefix_info, Mapping): + for prefix, info in prefix_info.items(): + if isinstance(info, Mapping): + all_prefix_info[prefix] = info + else: + all_prefix_info[prefix] = info + elif isinstance(prefix_info, Collection): + for prefix in prefix_info: + all_prefix_info[prefix] = {} + else: + raise TypeError( + "prefix_group must be a Collection of strings or a Mapping of string to string or None." + ) + + # split column into prefix groups + data_column_names = [] + data_columns = [] + existing_prefixed_columns = defaultdict(list) + + for col_name in table.column_names: + prefix_found = False + for prefix in all_prefix_info: + if col_name.startswith(prefix): + # Remove the prefix from the column name + base_name = col_name.removeprefix(prefix) + existing_prefixed_columns[prefix].append(base_name) + prefix_found = True + if not prefix_found: + # if no prefix found, consider this as a data column + data_column_names.append(col_name) + data_columns.append(table[col_name]) + + # Create source_info columns for each regular column + num_rows = table.num_rows + + prefixed_column_names = defaultdict(list) + prefixed_columns = defaultdict(list) + + target_column_names = [ + c + for c in data_column_names + if not any(c.startswith(prefix) for prefix in exclude_prefixes) + and c not in exclude_columns + ] + + for prefix, value_lut in all_prefix_info.items(): + target_prefixed_column_names = prefixed_column_names[prefix] + target_prefixed_columns = prefixed_columns[prefix] + + for col_name in target_column_names: + prefixed_col_name = f"{prefix}{col_name}" + existing_columns = existing_prefixed_columns[prefix] + + if isinstance(value_lut, Mapping): + value = value_lut.get(col_name) + else: + value = value_lut + + if value is not None: + # Use value from source_info dictionary + column_values = pa.array([value] * num_rows, type=pa.large_string()) + # if col_name is in existing_source_info, use that column + elif col_name in existing_columns: + # Use existing source_info column, but convert to large_string + existing_col = table[prefixed_col_name] + + if existing_col.type == pa.string(): + # Convert to large_string + column_values = pa.compute.cast(existing_col, pa.large_string()) # type: ignore + else: + column_values = existing_col + else: + # Use null values + column_values = pa.array([None] * num_rows, type=pa.large_string()) + target_prefixed_column_names.append(prefixed_col_name) + target_prefixed_columns.append(column_values) + + # Step 3: Create the final table + data_table: pa.Table = pa.Table.from_arrays(data_columns, names=data_column_names) + result_tables = {} + for prefix in all_prefix_info: + result_tables[prefix] = pa.Table.from_arrays( + prefixed_columns[prefix], names=prefixed_column_names[prefix] + ) + return data_table, result_tables + + +def drop_schema_columns(schema: pa.Schema, columns: Collection[str]) -> pa.Schema: + """ + Drop specified columns from a PyArrow schema. + + Args: + schema (pa.Schema): The original schema. + columns (list[str]): List of column names to drop. + + Returns: + pa.Schema: New schema with specified columns removed. + """ + return pa.schema([field for field in schema if field.name not in columns]) diff --git a/src/orcapod/utils/lazy_module.py b/src/orcapod/utils/lazy_module.py new file mode 100644 index 0000000..75cf057 --- /dev/null +++ b/src/orcapod/utils/lazy_module.py @@ -0,0 +1,155 @@ +import importlib +from types import ModuleType +from typing import Any, Optional + + +class LazyModule: + """ + A wrapper that lazily loads a module only when its attributes are first accessed. + + Example: + # Instead of: import expensive_module + expensive_module = LazyModule('expensive_module') + + # Module is only loaded when you access something: + result = expensive_module.some_function() # Now it imports + """ + + def __init__(self, module_name: str, package: str | None = None): + """ + Initialize lazy module loader. + + Args: + module_name: Name of the module to import + package: Package for relative imports (same as importlib.import_module) + """ + self._module_name = module_name + self._package = package + self._module: ModuleType | None = None + self._loaded = False + + def _load_module(self) -> ModuleType: + """Load the module if not already loaded.""" + if not self._loaded: + self._module = importlib.import_module(self._module_name, self._package) + self._loaded = True + assert self._module is not None, ( + f"Module '{self._module_name}' could not be loaded. " + "This should not happen if the module exists." + ) + return self._module + + def __getattr__(self, name: str) -> Any: + """Get attribute from the wrapped module, loading it if necessary.""" + if name.startswith("_"): + # Avoid infinite recursion for internal attributes + raise AttributeError( + f"'{self.__class__.__name__}' object has no attribute '{name}'" + ) + + module = self._load_module() + return getattr(module, name) + + def __setattr__(self, name: str, value: Any) -> None: + """Set attribute on the wrapped module or on this instance.""" + if name.startswith("_") or not self._loaded: + # Set on this instance for internal attributes or before loading + super().__setattr__(name, value) + else: + # Set on the wrapped module + setattr(self._load_module(), name, value) + + def __delattr__(self, name: str) -> None: + """Delete attribute from the wrapped module.""" + if name.startswith("_"): + super().__delattr__(name) + else: + delattr(self._load_module(), name) + + def __dir__(self) -> list[str]: + """Return directory of the wrapped module.""" + if self._loaded: + return dir(self._module) + else: + # Return empty list or basic attributes before loading + return [] + + def __repr__(self) -> str: + """String representation.""" + if self._loaded: + return f"" + else: + return f"" + + def __str__(self) -> str: + """String representation.""" + return self.__repr__() + + # Support for callable modules (modules with __call__) + def __call__(self, *args, **kwargs): + """Call the module if it's callable.""" + module = self._load_module() + return module(*args, **kwargs) # type: ignore + + # Support for iteration if the module is iterable + def __iter__(self): + """Iterate over the module if it's iterable.""" + module = self._load_module() + return iter(module) # type: ignore + + def __len__(self): + """Get length of the module if it supports len().""" + module = self._load_module() + return len(module) # type: ignore + + def __getitem__(self, key): + """Get item from the module if it supports indexing.""" + module = self._load_module() + return module[key] # type: ignore + + def __setitem__(self, key, value): + """Set item on the module if it supports item assignment.""" + module = self._load_module() + module[key] = value # type: ignore + + def __contains__(self, item): + """Check if item is in the module if it supports 'in' operator.""" + module = self._load_module() + return item in module + + @property + def is_loaded(self) -> bool: + """Check if the module has been loaded.""" + return self._loaded + + @property + def module_name(self) -> str: + """Get the module name.""" + return self._module_name + + def force_load(self) -> ModuleType: + """Force load the module and return it.""" + return self._load_module() + + +# Convenience function for creating lazy modules +def lazy_import(module_name: str, package: Optional[str] = None) -> LazyModule: + """ + Create a lazy module loader. + + Args: + module_name: Name of the module to import + package: Package for relative imports + + Returns: + LazyModule instance that will load the module on first access + + Example: + np = lazy_import('numpy') + pd = lazy_import('pandas') + + # Modules are only imported when you use them: + array = np.array([1, 2, 3]) # numpy imported here + df = pd.DataFrame({'a': [1, 2]}) # pandas imported here + """ + return LazyModule(module_name, package) diff --git a/src/orcapod/utils/name.py b/src/orcapod/utils/name.py index ba2c4f0..2211ef6 100644 --- a/src/orcapod/utils/name.py +++ b/src/orcapod/utils/name.py @@ -5,6 +5,70 @@ import re +# TODO: move these functions to util +def escape_with_postfix(field: str, postfix=None, separator="_") -> str: + """ + Escape the field string by doubling separators and optionally append a postfix. + This function takes a field string and escapes any occurrences of the separator + by doubling them, then optionally appends a postfix with a separator prefix. + + Args: + field (str): The input string containing to be escaped. + postfix (str, optional): An optional postfix to append to the escaped string. + If None, no postfix is added. Defaults to None. + separator (str, optional): The separator character to escape and use for + prefixing the postfix. Defaults to "_". + Returns: + str: The escaped string with optional postfix. Returns empty string if + fields is provided but postfix is None. + Examples: + >>> escape_with_postfix("field1_field2", "suffix") + 'field1__field2_suffix' + >>> escape_with_postfix("name_age_city", "backup", "_") + 'name__age__city_backup' + >>> escape_with_postfix("data-info", "temp", "-") + 'data--info-temp' + >>> escape_with_postfix("simple", None) + 'simple' + >>> escape_with_postfix("no_separators", "end") + 'no__separators_end' + """ + + return field.replace(separator, separator * 2) + (f"_{postfix}" if postfix else "") + + +def unescape_with_postfix(field: str, separator="_") -> tuple[str, str | None]: + """ + Unescape a string by converting double separators back to single separators and extract postfix metadata. + This function reverses the escaping process where single separators were doubled to avoid + conflicts with metadata delimiters. It splits the input on double separators, then extracts + any postfix metadata from the last part. + + Args: + field (str): The escaped string containing doubled separators and optional postfix metadata + separator (str, optional): The separator character used for escaping. Defaults to "_" + Returns: + tuple[str, str | None]: A tuple containing: + - The unescaped string with single separators restored + - The postfix metadata if present, None otherwise + Examples: + >>> unescape_with_postfix("field1__field2__field3") + ('field1_field2_field3', None) + >>> unescape_with_postfix("field1__field2_metadata") + ('field1_field2', 'metadata') + >>> unescape_with_postfix("simple") + ('simple', None) + >>> unescape_with_postfix("field1--field2", separator="-") + ('field1-field2', None) + >>> unescape_with_postfix("field1--field2-meta", separator="-") + ('field1-field2', 'meta') + """ + + parts = field.split(separator * 2) + parts[-1], *meta = parts[-1].split("_", 1) + return separator.join(parts), meta[0] if meta else None + + def find_noncolliding_name(name: str, lut: dict) -> str: """ Generate a unique name that does not collide with existing keys in a lookup table (lut). diff --git a/src/orcapod/utils/object_spec.py b/src/orcapod/utils/object_spec.py new file mode 100644 index 0000000..8949622 --- /dev/null +++ b/src/orcapod/utils/object_spec.py @@ -0,0 +1,29 @@ +import importlib +from typing import Any + + +def parse_objectspec(obj_spec: Any) -> Any: + if isinstance(obj_spec, dict): + if "_class" in obj_spec: + # if _class is specified, treat the dict as an object specification, looking for + # _config key to extract configuration parameters + module_name, class_name = obj_spec["_class"].rsplit(".", 1) + module = importlib.import_module(module_name) + cls = getattr(module, class_name) + configs = parse_objectspec(obj_spec.get("_config", {})) + return cls(**configs) + else: + # otherwise, parse through the dictionary recursively + parsed_object = obj_spec + for k, v in obj_spec.items(): + parsed_object[k] = parse_objectspec(v) + return parsed_object + elif isinstance(obj_spec, list): + # if it's a list, parse each item in the list + return [parse_objectspec(item) for item in obj_spec] + elif isinstance(obj_spec, tuple): + # if it's a tuple, parse each item in the tuple + return tuple(parse_objectspec(item) for item in obj_spec) + else: + # if it's neither a dict nor a list, return it as is + return obj_spec diff --git a/src/orcapod/utils/stream_utils.py b/src/orcapod/utils/stream_utils.py index 51d46c1..4246088 100644 --- a/src/orcapod/utils/stream_utils.py +++ b/src/orcapod/utils/stream_utils.py @@ -12,24 +12,6 @@ V = TypeVar("V") -def get_typespec(dict: Mapping) -> TypeSpec: - """ - Returns a TypeSpec for the given dictionary. - The TypeSpec is a mapping from field name to Python type. - """ - return {key: type(value) for key, value in dict.items()} - - -def get_compatible_type(type1: Any, type2: Any) -> Any: - if type1 is type2: - return type1 - if issubclass(type1, type2): - return type2 - if issubclass(type2, type1): - return type1 - raise TypeError(f"Types {type1} and {type2} are not compatible") - - def merge_dicts(left: dict[K, V], right: dict[K, V]) -> dict[K, V]: merged = left.copy() for key, right_value in right.items(): @@ -43,22 +25,6 @@ def merge_dicts(left: dict[K, V], right: dict[K, V]) -> dict[K, V]: return merged -def merge_typespecs(left: TypeSpec | None, right: TypeSpec | None) -> TypeSpec | None: - if left is None: - return right - if right is None: - return left - # Merge the two TypeSpecs but raise an error if conflicts in types are found - merged = dict(left) - for key, right_type in right.items(): - merged[key] = ( - get_compatible_type(merged[key], right_type) - if key in merged - else right_type - ) - return merged - - def common_elements(*values) -> Collection[str]: """ Returns the common keys between all lists of values. The identified common elements are @@ -89,6 +55,24 @@ def join_tags(tag1: Mapping[K, V], tag2: Mapping[K, V]) -> dict[K, V] | None: return joined_tag +def semijoin_tags( + tag1: Mapping[K, V], tag2: Mapping[K, V], target_keys: Collection[K] | None = None +) -> dict[K, V] | None: + """ + Semijoin two tags. If the tags have the same key, the value must be the same or None will be returned. If all shared + key's value match, tag1 would be returned + """ + if target_keys is None: + target_keys = set(tag1.keys()).intersection(set(tag2.keys())) + if not target_keys: + return dict(tag1) + + for key in target_keys: + if tag1[key] != tag2[key]: + return None + return dict(tag1) + + def check_packet_compatibility(packet1: Packet, packet2: Packet) -> bool: """ Checks if two packets are compatible. If the packets have the same key, the value must be the same or False will be returned. diff --git a/tests/test_hashing/generate_file_hashes.py b/tests/test_hashing/generate_file_hashes.py index 1002b7f..0beb66c 100644 --- a/tests/test_hashing/generate_file_hashes.py +++ b/tests/test_hashing/generate_file_hashes.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/generate_file_hashes.py """ Generate sample files with random content and record their hashes. @@ -14,7 +13,7 @@ from datetime import datetime from pathlib import Path -# Add the parent directory to the path to import orcabridge +# Add the parent directory to the path to import orcapod sys.path.append(str(Path(__file__).parent.parent.parent)) from orcapod.hashing import hash_file diff --git a/tests/test_hashing/generate_pathset_packet_hashes.py b/tests/test_hashing/generate_pathset_packet_hashes.py index 61a36eb..edd804d 100644 --- a/tests/test_hashing/generate_pathset_packet_hashes.py +++ b/tests/test_hashing/generate_pathset_packet_hashes.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/generate_pathset_packet_hashes.py """ Generate sample pathsets and packets and record their hashes. @@ -11,7 +10,7 @@ import sys from pathlib import Path -# Add the parent directory to the path to import orcabridge +# Add the parent directory to the path to import orcapod sys.path.append(str(Path(__file__).parent.parent.parent)) from orcapod.hashing import hash_packet, hash_pathset diff --git a/tests/test_hashing/test_basic_composite_hasher.py b/tests/test_hashing/test_basic_composite_hasher.py index d2c5361..a2d35a6 100644 --- a/tests/test_hashing/test_basic_composite_hasher.py +++ b/tests/test_hashing/test_basic_composite_hasher.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_default_file_hasher.py """ Test DefaultFileHasher functionality. @@ -13,7 +12,7 @@ import pytest -from orcapod.hashing.file_hashers import PathLikeHasherFactory +from orcapod.hashing.file_hashers import LegacyPathLikeHasherFactory def load_hash_lut(): @@ -83,7 +82,7 @@ def verify_path_exists(rel_path): def test_default_file_hasher_file_hash_consistency(): """Test that DefaultFileHasher.hash_file produces consistent results for the sample files.""" hash_lut = load_hash_lut() - hasher = PathLikeHasherFactory.create_basic_composite() + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite() for filename, info in hash_lut.items(): rel_path = info["file"] @@ -105,7 +104,7 @@ def test_default_file_hasher_file_hash_consistency(): def test_default_file_hasher_pathset_hash_consistency(): """Test that DefaultFileHasher.hash_pathset produces consistent results for the sample pathsets.""" hash_lut = load_pathset_hash_lut() - hasher = PathLikeHasherFactory.create_basic_composite() + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite() for name, info in hash_lut.items(): paths_rel = info["paths"] @@ -138,7 +137,7 @@ def test_default_file_hasher_pathset_hash_consistency(): def test_default_file_hasher_packet_hash_consistency(): """Test that DefaultFileHasher.hash_packet produces consistent results for the sample packets.""" hash_lut = load_packet_hash_lut() - hasher = PathLikeHasherFactory.create_basic_composite() + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite() for name, info in hash_lut.items(): structure = info["structure"] @@ -182,7 +181,9 @@ def test_default_file_hasher_file_hash_algorithm_parameters(): for algorithm in algorithms: try: - hasher = PathLikeHasherFactory.create_basic_composite(algorithm=algorithm) + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( + algorithm=algorithm + ) hash1 = hasher.hash_file(file_path) hash2 = hasher.hash_file(file_path) assert hash1 == hash2, f"Hash inconsistent for algorithm {algorithm}" @@ -194,7 +195,9 @@ def test_default_file_hasher_file_hash_algorithm_parameters(): buffer_sizes = [1024, 4096, 16384, 65536] for buffer_size in buffer_sizes: - hasher = PathLikeHasherFactory.create_basic_composite(buffer_size=buffer_size) + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( + buffer_size=buffer_size + ) hash1 = hasher.hash_file(file_path) hash2 = hasher.hash_file(file_path) assert hash1 == hash2, f"Hash inconsistent for buffer size {buffer_size}" @@ -223,7 +226,9 @@ def test_default_file_hasher_pathset_hash_algorithm_parameters(): for algorithm in algorithms: try: - hasher = PathLikeHasherFactory.create_basic_composite(algorithm=algorithm) + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( + algorithm=algorithm + ) hash1 = hasher.hash_pathset(pathset) hash2 = hasher.hash_pathset(pathset) assert hash1 == hash2, f"Hash inconsistent for algorithm {algorithm}" @@ -235,7 +240,9 @@ def test_default_file_hasher_pathset_hash_algorithm_parameters(): buffer_sizes = [1024, 4096, 16384, 65536] for buffer_size in buffer_sizes: - hasher = PathLikeHasherFactory.create_basic_composite(buffer_size=buffer_size) + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( + buffer_size=buffer_size + ) hash1 = hasher.hash_pathset(pathset) hash2 = hasher.hash_pathset(pathset) assert hash1 == hash2, f"Hash inconsistent for buffer size {buffer_size}" @@ -267,7 +274,9 @@ def test_default_file_hasher_packet_hash_algorithm_parameters(): for algorithm in algorithms: try: - hasher = PathLikeHasherFactory.create_basic_composite(algorithm=algorithm) + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( + algorithm=algorithm + ) hash1 = hasher.hash_packet(packet) hash2 = hasher.hash_packet(packet) @@ -286,7 +295,9 @@ def test_default_file_hasher_packet_hash_algorithm_parameters(): buffer_sizes = [1024, 4096, 16384, 65536] for buffer_size in buffer_sizes: - hasher = PathLikeHasherFactory.create_basic_composite(buffer_size=buffer_size) + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( + buffer_size=buffer_size + ) hash1 = hasher.hash_packet(packet) hash2 = hasher.hash_packet(packet) assert hash1 == hash2, f"Hash inconsistent for buffer size {buffer_size}" diff --git a/tests/test_hashing/test_basic_hashing.py b/tests/test_hashing/test_basic_hashing.py index df90a1a..c67723a 100644 --- a/tests/test_hashing/test_basic_hashing.py +++ b/tests/test_hashing/test_basic_hashing.py @@ -1,4 +1,4 @@ -from orcapod.hashing.core import ( +from orcapod.hashing.legacy_core import ( HashableMixin, hash_to_hex, hash_to_int, diff --git a/tests/test_hashing/test_cached_file_hasher.py b/tests/test_hashing/test_cached_file_hasher.py index 3307628..8b9ce30 100644 --- a/tests/test_hashing/test_cached_file_hasher.py +++ b/tests/test_hashing/test_cached_file_hasher.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_cached_file_hasher.py """Tests for CachedFileHasher implementation.""" import json @@ -11,11 +10,11 @@ import pytest from orcapod.hashing.file_hashers import ( - BasicFileHasher, - CachedFileHasher, + LegacyDefaultFileHasher, + LegacyCachedFileHasher, ) from orcapod.hashing.string_cachers import InMemoryCacher -from orcapod.hashing.types import FileHasher, StringCacher +from orcapod.hashing.types import LegacyFileHasher, StringCacher def verify_path_exists(rel_path): @@ -74,15 +73,15 @@ def load_packet_hash_lut(): def test_cached_file_hasher_construction(): """Test that CachedFileHasher can be constructed with various parameters.""" # Test with default parameters - file_hasher = BasicFileHasher() + file_hasher = LegacyDefaultFileHasher() string_cacher = InMemoryCacher() - cached_hasher1 = CachedFileHasher(file_hasher, string_cacher) + cached_hasher1 = LegacyCachedFileHasher(file_hasher, string_cacher) assert cached_hasher1.file_hasher == file_hasher assert cached_hasher1.string_cacher == string_cacher # Test that CachedFileHasher implements FileHasher protocol - assert isinstance(cached_hasher1, FileHasher) + assert isinstance(cached_hasher1, LegacyFileHasher) def test_cached_file_hasher_file_caching(): @@ -100,8 +99,8 @@ def test_cached_file_hasher_file_caching(): mock_string_cacher = MagicMock(spec=StringCacher) mock_string_cacher.get_cached.return_value = None # Initially no cached value - file_hasher = BasicFileHasher() - cached_hasher = CachedFileHasher(file_hasher, mock_string_cacher) + file_hasher = LegacyDefaultFileHasher() + cached_hasher = LegacyCachedFileHasher(file_hasher, mock_string_cacher) # First call should compute the hash and cache it result1 = cached_hasher.hash_file(file_path) @@ -137,14 +136,14 @@ def test_cached_file_hasher_call_counts(): try: # Mock the file_hasher to track calls - mock_file_hasher = MagicMock(spec=FileHasher) + mock_file_hasher = MagicMock(spec=LegacyFileHasher) mock_file_hasher.hash_file.return_value = "mock_file_hash" # Real cacher string_cacher = InMemoryCacher() # Create the cached file hasher with all caching enabled - cached_hasher = CachedFileHasher( + cached_hasher = LegacyCachedFileHasher( mock_file_hasher, string_cacher, ) @@ -182,11 +181,11 @@ def test_cached_file_hasher_performance(): file_path = verify_path_exists(info["file"]) # Setup non-cached hasher - file_hasher = BasicFileHasher() + file_hasher = LegacyDefaultFileHasher() # Setup cached hasher string_cacher = InMemoryCacher() - cached_hasher = CachedFileHasher(file_hasher, string_cacher) + cached_hasher = LegacyCachedFileHasher(file_hasher, string_cacher) # Measure time for multiple hash operations with non-cached hasher start_time = time.time() @@ -222,11 +221,11 @@ def test_cached_file_hasher_with_different_cachers(): try: file_path = temp_file.name - file_hasher = BasicFileHasher() + file_hasher = LegacyDefaultFileHasher() # Test with InMemoryCacher mem_cacher = InMemoryCacher(max_size=10) - cached_hasher1 = CachedFileHasher(file_hasher, mem_cacher) + cached_hasher1 = LegacyCachedFileHasher(file_hasher, mem_cacher) # First hash call hash1 = cached_hasher1.hash_file(file_path) @@ -250,7 +249,7 @@ def clear_cache(self) -> None: self.storage.clear() custom_cacher = CustomCacher() - cached_hasher2 = CachedFileHasher(file_hasher, custom_cacher) + cached_hasher2 = LegacyCachedFileHasher(file_hasher, custom_cacher) # Get hash with custom cacher hash2 = cached_hasher2.hash_file(file_path) diff --git a/tests/test_hashing/test_file_hashes.py b/tests/test_hashing/test_file_hashes.py index 66ed987..afcaaad 100644 --- a/tests/test_hashing/test_file_hashes.py +++ b/tests/test_hashing/test_file_hashes.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_file_hashes.py """ Test file hash consistency. @@ -12,8 +11,8 @@ import pytest -# Add the parent directory to the path to import orcabridge -from orcapod.hashing import hash_file +# Add the parent directory to the path to import orcapod +from orcapod.hashing.legacy_core import hash_file def load_hash_lut(): diff --git a/tests/test_hashing/test_hash_samples.py b/tests/test_hashing/test_hash_samples.py index cfb3e35..1e536cb 100644 --- a/tests/test_hashing/test_hash_samples.py +++ b/tests/test_hashing/test_hash_samples.py @@ -12,7 +12,7 @@ import pytest -from orcapod.hashing import hash_to_hex, hash_to_int, hash_to_uuid +from orcapod.hashing.legacy_core import hash_to_hex, hash_to_int, hash_to_uuid def get_latest_hash_samples(): diff --git a/tests/test_hashing/test_hasher_factory.py b/tests/test_hashing/test_hasher_factory.py index afd2392..68daa3a 100644 --- a/tests/test_hashing/test_hasher_factory.py +++ b/tests/test_hashing/test_hasher_factory.py @@ -5,9 +5,9 @@ from pathlib import Path from orcapod.hashing.file_hashers import ( - BasicFileHasher, - CachedFileHasher, - PathLikeHasherFactory, + LegacyDefaultFileHasher, + LegacyCachedFileHasher, + LegacyPathLikeHasherFactory, ) from orcapod.hashing.string_cachers import FileCacher, InMemoryCacher @@ -17,11 +17,11 @@ class TestPathLikeHasherFactoryCreateFileHasher: def test_create_file_hasher_without_cacher(self): """Test creating a file hasher without string cacher (returns BasicFileHasher).""" - hasher = PathLikeHasherFactory.create_file_hasher() + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher() - # Should return BasicFileHasher - assert isinstance(hasher, BasicFileHasher) - assert not isinstance(hasher, CachedFileHasher) + # Should return LegacyDefaultFileHasher + assert isinstance(hasher, LegacyDefaultFileHasher) + assert not isinstance(hasher, LegacyCachedFileHasher) # Check default parameters assert hasher.algorithm == "sha256" @@ -30,60 +30,67 @@ def test_create_file_hasher_without_cacher(self): def test_create_file_hasher_with_cacher(self): """Test creating a file hasher with string cacher (returns CachedFileHasher).""" cacher = InMemoryCacher() - hasher = PathLikeHasherFactory.create_file_hasher(string_cacher=cacher) + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( + string_cacher=cacher + ) - # Should return CachedFileHasher - assert isinstance(hasher, CachedFileHasher) + # Should return LegacyCachedFileHasher + assert isinstance(hasher, LegacyCachedFileHasher) assert hasher.string_cacher is cacher - # The underlying file hasher should be BasicFileHasher with defaults - assert isinstance(hasher.file_hasher, BasicFileHasher) + # The underlying file hasher should be LegacyDefaultFileHasher with defaults + assert isinstance(hasher.file_hasher, LegacyDefaultFileHasher) assert hasher.file_hasher.algorithm == "sha256" assert hasher.file_hasher.buffer_size == 65536 def test_create_file_hasher_custom_algorithm(self): """Test creating file hasher with custom algorithm.""" # Without cacher - hasher = PathLikeHasherFactory.create_file_hasher(algorithm="md5") - assert isinstance(hasher, BasicFileHasher) + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher(algorithm="md5") + assert isinstance(hasher, LegacyDefaultFileHasher) assert hasher.algorithm == "md5" assert hasher.buffer_size == 65536 # With cacher cacher = InMemoryCacher() - hasher = PathLikeHasherFactory.create_file_hasher( + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=cacher, algorithm="sha512" ) - assert isinstance(hasher, CachedFileHasher) + assert isinstance(hasher, LegacyCachedFileHasher) + assert isinstance(hasher.file_hasher, LegacyDefaultFileHasher) assert hasher.file_hasher.algorithm == "sha512" assert hasher.file_hasher.buffer_size == 65536 def test_create_file_hasher_custom_buffer_size(self): """Test creating file hasher with custom buffer size.""" # Without cacher - hasher = PathLikeHasherFactory.create_file_hasher(buffer_size=32768) - assert isinstance(hasher, BasicFileHasher) + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( + buffer_size=32768 + ) + assert isinstance(hasher, LegacyDefaultFileHasher) assert hasher.algorithm == "sha256" assert hasher.buffer_size == 32768 # With cacher cacher = InMemoryCacher() - hasher = PathLikeHasherFactory.create_file_hasher( + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=cacher, buffer_size=8192 ) - assert isinstance(hasher, CachedFileHasher) + assert isinstance(hasher, LegacyCachedFileHasher) + assert isinstance(hasher.file_hasher, LegacyDefaultFileHasher) assert hasher.file_hasher.algorithm == "sha256" assert hasher.file_hasher.buffer_size == 8192 def test_create_file_hasher_all_custom_parameters(self): """Test creating file hasher with all custom parameters.""" cacher = InMemoryCacher(max_size=500) - hasher = PathLikeHasherFactory.create_file_hasher( + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=cacher, algorithm="blake2b", buffer_size=16384 ) - assert isinstance(hasher, CachedFileHasher) + assert isinstance(hasher, LegacyCachedFileHasher) assert hasher.string_cacher is cacher + assert isinstance(hasher.file_hasher, LegacyDefaultFileHasher) assert hasher.file_hasher.algorithm == "blake2b" assert hasher.file_hasher.buffer_size == 16384 @@ -91,17 +98,19 @@ def test_create_file_hasher_different_cacher_types(self): """Test creating file hasher with different types of string cachers.""" # InMemoryCacher memory_cacher = InMemoryCacher() - hasher1 = PathLikeHasherFactory.create_file_hasher(string_cacher=memory_cacher) - assert isinstance(hasher1, CachedFileHasher) + hasher1 = LegacyPathLikeHasherFactory.create_legacy_file_hasher( + string_cacher=memory_cacher + ) + assert isinstance(hasher1, LegacyCachedFileHasher) assert hasher1.string_cacher is memory_cacher # FileCacher with tempfile.NamedTemporaryFile(delete=False) as tmp_file: file_cacher = FileCacher(tmp_file.name) - hasher2 = PathLikeHasherFactory.create_file_hasher( + hasher2 = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=file_cacher ) - assert isinstance(hasher2, CachedFileHasher) + assert isinstance(hasher2, LegacyCachedFileHasher) assert hasher2.string_cacher is file_cacher # Clean up @@ -109,7 +118,7 @@ def test_create_file_hasher_different_cacher_types(self): def test_create_file_hasher_functional_without_cache(self): """Test that created file hasher actually works for hashing files.""" - hasher = PathLikeHasherFactory.create_file_hasher( + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( algorithm="sha256", buffer_size=1024 ) @@ -136,7 +145,7 @@ def test_create_file_hasher_functional_without_cache(self): def test_create_file_hasher_functional_with_cache(self): """Test that created cached file hasher works and caches results.""" cacher = InMemoryCacher() - hasher = PathLikeHasherFactory.create_file_hasher( + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=cacher, algorithm="sha256" ) @@ -164,44 +173,55 @@ def test_create_file_hasher_functional_with_cache(self): def test_create_file_hasher_none_cacher_explicit(self): """Test explicitly passing None for string_cacher.""" - hasher = PathLikeHasherFactory.create_file_hasher( + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=None, algorithm="sha1", buffer_size=4096 ) - assert isinstance(hasher, BasicFileHasher) - assert not isinstance(hasher, CachedFileHasher) + assert isinstance(hasher, LegacyDefaultFileHasher) + assert not isinstance(hasher, LegacyCachedFileHasher) assert hasher.algorithm == "sha1" assert hasher.buffer_size == 4096 def test_create_file_hasher_parameter_edge_cases(self): """Test edge cases for parameters.""" # Very small buffer size - hasher1 = PathLikeHasherFactory.create_file_hasher(buffer_size=1) + hasher1 = LegacyPathLikeHasherFactory.create_legacy_file_hasher(buffer_size=1) + assert isinstance(hasher1, LegacyDefaultFileHasher) assert hasher1.buffer_size == 1 # Large buffer size - hasher2 = PathLikeHasherFactory.create_file_hasher(buffer_size=1024 * 1024) + hasher2 = LegacyPathLikeHasherFactory.create_legacy_file_hasher( + buffer_size=1024 * 1024 + ) + assert isinstance(hasher2, LegacyDefaultFileHasher) assert hasher2.buffer_size == 1024 * 1024 # Different algorithms for algorithm in ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]: - hasher = PathLikeHasherFactory.create_file_hasher(algorithm=algorithm) + hasher = LegacyPathLikeHasherFactory.create_legacy_file_hasher( + algorithm=algorithm + ) + assert isinstance(hasher, LegacyDefaultFileHasher) assert hasher.algorithm == algorithm def test_create_file_hasher_cache_independence(self): """Test that different cached hashers with same cacher are independent.""" cacher = InMemoryCacher() - hasher1 = PathLikeHasherFactory.create_file_hasher( + hasher1 = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=cacher, algorithm="sha256" ) - hasher2 = PathLikeHasherFactory.create_file_hasher( + hasher2 = LegacyPathLikeHasherFactory.create_legacy_file_hasher( string_cacher=cacher, algorithm="md5" ) # Both should use the same cacher but be different instances + assert isinstance(hasher1, LegacyCachedFileHasher) + assert isinstance(hasher2, LegacyCachedFileHasher) assert hasher1.string_cacher is cacher assert hasher2.string_cacher is cacher assert hasher1 is not hasher2 assert hasher1.file_hasher is not hasher2.file_hasher + assert isinstance(hasher1.file_hasher, LegacyDefaultFileHasher) + assert isinstance(hasher2.file_hasher, LegacyDefaultFileHasher) assert hasher1.file_hasher.algorithm != hasher2.file_hasher.algorithm diff --git a/tests/test_hashing/test_hasher_parity.py b/tests/test_hashing/test_hasher_parity.py index fb83afb..a278a92 100644 --- a/tests/test_hashing/test_hasher_parity.py +++ b/tests/test_hashing/test_hasher_parity.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_hasher_parity.py """ Test parity between DefaultFileHasher and core hashing functions. @@ -14,8 +13,8 @@ import pytest -from orcapod.hashing.core import hash_file, hash_packet, hash_pathset -from orcapod.hashing.file_hashers import PathLikeHasherFactory +from orcapod.hashing.legacy_core import hash_file, hash_packet, hash_pathset +from orcapod.hashing.file_hashers import LegacyPathLikeHasherFactory def load_hash_lut(): @@ -74,7 +73,7 @@ def verify_path_exists(rel_path): def test_hasher_core_parity_file_hash(): """Test that BasicFileHasher.hash_file produces the same results as hash_file.""" hash_lut = load_hash_lut() - hasher = PathLikeHasherFactory.create_basic_composite() + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite() # Test all sample files for filename, info in hash_lut.items(): @@ -103,7 +102,7 @@ def test_hasher_core_parity_file_hash(): for buffer_size in buffer_sizes: try: # Create a hasher with specific parameters - hasher = PathLikeHasherFactory.create_basic_composite( + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( algorithm=algorithm, buffer_size=buffer_size ) @@ -148,7 +147,7 @@ def test_hasher_core_parity_pathset_hash(): for buffer_size in buffer_sizes: for char_count in char_counts: # Create a hasher with specific parameters - hasher = PathLikeHasherFactory.create_basic_composite( + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( algorithm=algorithm, buffer_size=buffer_size, char_count=char_count, @@ -202,7 +201,7 @@ def test_hasher_core_parity_packet_hash(): for buffer_size in buffer_sizes: for char_count in char_counts: # Create a hasher with specific parameters - hasher = PathLikeHasherFactory.create_basic_composite( + hasher = LegacyPathLikeHasherFactory.create_basic_legacy_composite( algorithm=algorithm, buffer_size=buffer_size, char_count=char_count, diff --git a/tests/test_hashing/test_composite_hasher.py b/tests/test_hashing/test_legacy_composite_hasher.py similarity index 81% rename from tests/test_hashing/test_composite_hasher.py rename to tests/test_hashing/test_legacy_composite_hasher.py index 1cbe386..f234bb7 100644 --- a/tests/test_hashing/test_composite_hasher.py +++ b/tests/test_hashing/test_legacy_composite_hasher.py @@ -5,9 +5,16 @@ import pytest -from orcapod.hashing.core import hash_to_hex -from orcapod.hashing.file_hashers import BasicFileHasher, DefaultCompositeFileHasher -from orcapod.hashing.types import FileHasher, PacketHasher, PathSetHasher +from orcapod.hashing.legacy_core import hash_to_hex +from orcapod.hashing.file_hashers import ( + LegacyDefaultFileHasher, + LegacyDefaultCompositeFileHasher, +) +from orcapod.hashing.types import ( + LegacyFileHasher, + LegacyPacketHasher, + LegacyPathSetHasher, +) # Custom implementation of hash_file for tests that doesn't check for file existence @@ -89,9 +96,11 @@ def mock_hash_packet( def patch_hash_functions(): """Patch the hash functions in the core module for all tests.""" with ( - patch("orcapod.hashing.core.hash_file", side_effect=mock_hash_file), - patch("orcapod.hashing.core.hash_pathset", side_effect=mock_hash_pathset), - patch("orcapod.hashing.core.hash_packet", side_effect=mock_hash_packet), + patch("orcapod.hashing.legacy_core.hash_file", side_effect=mock_hash_file), + patch( + "orcapod.hashing.legacy_core.hash_pathset", side_effect=mock_hash_pathset + ), + patch("orcapod.hashing.legacy_core.hash_packet", side_effect=mock_hash_packet), ): yield @@ -99,15 +108,15 @@ def patch_hash_functions(): def test_default_composite_hasher_implements_all_protocols(): """Test that CompositeFileHasher implements all three protocols.""" # Create a basic file hasher to be used within the composite hasher - file_hasher = BasicFileHasher() + file_hasher = LegacyDefaultFileHasher() # Create the composite hasher - composite_hasher = DefaultCompositeFileHasher(file_hasher) + composite_hasher = LegacyDefaultCompositeFileHasher(file_hasher) # Verify it implements all three protocols - assert isinstance(composite_hasher, FileHasher) - assert isinstance(composite_hasher, PathSetHasher) - assert isinstance(composite_hasher, PacketHasher) + assert isinstance(composite_hasher, LegacyFileHasher) + assert isinstance(composite_hasher, LegacyPathSetHasher) + assert isinstance(composite_hasher, LegacyPacketHasher) def test_default_composite_hasher_file_hashing(): @@ -121,7 +130,7 @@ def hash_file(self, file_path): return mock_hash_file(file_path) file_hasher = MockFileHasher() - composite_hasher = DefaultCompositeFileHasher(file_hasher) + composite_hasher = LegacyDefaultCompositeFileHasher(file_hasher) # Get hash from the composite hasher and directly from the file hasher direct_hash = file_hasher.hash_file(file_path) @@ -136,11 +145,11 @@ def test_default_composite_hasher_pathset_hashing(): # Create a custom mock file hasher that doesn't check for file existence class MockFileHasher: - def hash_file(self, file_path): + def hash_file(self, file_path) -> str: return mock_hash_file(file_path) file_hasher = MockFileHasher() - composite_hasher = DefaultCompositeFileHasher(file_hasher) + composite_hasher = LegacyDefaultCompositeFileHasher(file_hasher) # Simple path set with non-existent paths pathset = ["/path/to/file1.txt", "/path/to/file2.txt"] diff --git a/tests/test_hashing/test_packet_hasher.py b/tests/test_hashing/test_packet_hasher.py index f9d519d..80a16ed 100644 --- a/tests/test_hashing/test_packet_hasher.py +++ b/tests/test_hashing/test_packet_hasher.py @@ -1,14 +1,13 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_packet_hasher.py """Tests for the PacketHasher protocol implementation.""" import pytest -from orcapod.hashing.file_hashers import DefaultPacketHasher -from orcapod.hashing.types import PathSetHasher +from orcapod.hashing.file_hashers import LegacyDefaultPacketHasher +from orcapod.hashing.types import LegacyPathSetHasher -class MockPathSetHasher(PathSetHasher): +class MockPathSetHasher(LegacyPathSetHasher): """Simple mock PathSetHasher for testing.""" def __init__(self, hash_value="mock_hash"): @@ -20,10 +19,10 @@ def hash_pathset(self, pathset): return f"{self.hash_value}_{pathset}" -def test_default_packet_hasher_empty_packet(): - """Test DefaultPacketHasher with an empty packet.""" +def test_legacy_packet_hasher_empty_packet(): + """Test LegacyPacketHasher with an empty packet.""" pathset_hasher = MockPathSetHasher() - packet_hasher = DefaultPacketHasher(pathset_hasher) + packet_hasher = LegacyDefaultPacketHasher(pathset_hasher) # Test with empty packet packet = {} @@ -37,10 +36,10 @@ def test_default_packet_hasher_empty_packet(): assert isinstance(result, str) -def test_default_packet_hasher_single_entry(): - """Test DefaultPacketHasher with a packet containing a single entry.""" +def test_legacy_packet_hasher_single_entry(): + """Test LegacyPacketHasher with a packet containing a single entry.""" pathset_hasher = MockPathSetHasher() - packet_hasher = DefaultPacketHasher(pathset_hasher) + packet_hasher = LegacyDefaultPacketHasher(pathset_hasher) # Test with a single entry packet = {"input": "/path/to/file.txt"} @@ -55,10 +54,10 @@ def test_default_packet_hasher_single_entry(): assert isinstance(result, str) -def test_default_packet_hasher_multiple_entries(): - """Test DefaultPacketHasher with a packet containing multiple entries.""" +def test_legacy_packet_hasher_multiple_entries(): + """Test LegacyPacketHasher with a packet containing multiple entries.""" pathset_hasher = MockPathSetHasher() - packet_hasher = DefaultPacketHasher(pathset_hasher) + packet_hasher = LegacyDefaultPacketHasher(pathset_hasher) # Test with multiple entries packet = { @@ -79,10 +78,10 @@ def test_default_packet_hasher_multiple_entries(): assert isinstance(result, str) -def test_default_packet_hasher_nested_structure(): - """Test DefaultPacketHasher with a deeply nested packet structure.""" +def test_legacy_packet_hasher_nested_structure(): + """Test LegacyPacketHasher with a deeply nested packet structure.""" pathset_hasher = MockPathSetHasher() - packet_hasher = DefaultPacketHasher(pathset_hasher) + packet_hasher = LegacyDefaultPacketHasher(pathset_hasher) # Test with nested packet structure packet = { @@ -104,16 +103,16 @@ def test_default_packet_hasher_nested_structure(): assert isinstance(result, str) -def test_default_packet_hasher_with_char_count(): - """Test DefaultPacketHasher with different char_count values.""" +def test_legacy_packet_hasher_with_char_count(): + """Test LegacyPacketHasher with different char_count values.""" pathset_hasher = MockPathSetHasher() # Test with default char_count (32) - default_hasher = DefaultPacketHasher(pathset_hasher) + default_hasher = LegacyDefaultPacketHasher(pathset_hasher) default_result = default_hasher.hash_packet({"input": "/path/to/file.txt"}) # Test with custom char_count - custom_hasher = DefaultPacketHasher(pathset_hasher, char_count=16) + custom_hasher = LegacyDefaultPacketHasher(pathset_hasher, char_count=16) custom_result = custom_hasher.hash_packet({"input": "/path/to/file.txt"}) # Results should be different based on char_count diff --git a/tests/test_hashing/test_path_set_hasher.py b/tests/test_hashing/test_path_set_hasher.py index 999cc2a..c235eb0 100644 --- a/tests/test_hashing/test_path_set_hasher.py +++ b/tests/test_hashing/test_path_set_hasher.py @@ -8,12 +8,12 @@ import pytest -import orcapod.hashing.core -from orcapod.hashing.file_hashers import DefaultPathsetHasher -from orcapod.hashing.types import FileHasher +import orcapod.hashing.legacy_core +from orcapod.hashing.file_hashers import LegacyDefaultPathsetHasher +from orcapod.hashing.types import LegacyFileHasher -class MockFileHasher(FileHasher): +class MockFileHasher(LegacyFileHasher): """Simple mock FileHasher for testing.""" def __init__(self, hash_value="mock_hash"): @@ -35,7 +35,7 @@ def create_temp_file(content="test content"): # Store original function for restoration -original_hash_pathset = orcapod.hashing.core.hash_pathset +original_hash_pathset = orcapod.hashing.legacy_core.hash_pathset # Custom implementation of hash_pathset for tests that doesn't check for file existence @@ -46,7 +46,7 @@ def mock_hash_pathset( from collections.abc import Collection from os import PathLike - from orcapod.hashing.core import hash_to_hex + from orcapod.hashing.legacy_core import hash_to_hex from orcapod.utils.name import find_noncolliding_name # If file_hasher is None, we'll need to handle it differently @@ -86,14 +86,16 @@ def mock_hash_pathset( @pytest.fixture(autouse=True) def patch_hash_pathset(): """Patch the hash_pathset function in the hashing module for all tests.""" - with patch("orcapod.hashing.core.hash_pathset", side_effect=mock_hash_pathset): + with patch( + "orcapod.hashing.legacy_core.hash_pathset", side_effect=mock_hash_pathset + ): yield -def test_default_pathset_hasher_single_file(): - """Test DefaultPathsetHasher with a single file path.""" +def test_legacy_pathset_hasher_single_file(): + """Test LegacyPathsetHasher with a single file path.""" file_hasher = MockFileHasher() - pathset_hasher = DefaultPathsetHasher(file_hasher) + pathset_hasher = LegacyDefaultPathsetHasher(file_hasher) # Create a real file for testing file_path = create_temp_file() @@ -116,7 +118,7 @@ def test_default_pathset_hasher_single_file(): def test_default_pathset_hasher_multiple_files(): """Test DefaultPathsetHasher with multiple files in a list.""" file_hasher = MockFileHasher() - pathset_hasher = DefaultPathsetHasher(file_hasher) + pathset_hasher = LegacyDefaultPathsetHasher(file_hasher) # Create real files for testing file_paths = [create_temp_file(f"content {i}") for i in range(3)] @@ -195,7 +197,7 @@ def test_default_pathset_hasher_nested_paths(): def test_default_pathset_hasher_with_nonexistent_files(): """Test DefaultPathsetHasher with both existent and non-existent files.""" file_hasher = MockFileHasher() - pathset_hasher = DefaultPathsetHasher(file_hasher) + pathset_hasher = LegacyDefaultPathsetHasher(file_hasher) # Reset the file_hasher's call list file_hasher.file_hash_calls = [] @@ -225,7 +227,8 @@ def custom_hash_nonexistent(pathset, **kwargs): # Patch hash_pathset just for this test with patch( - "orcapod.hashing.core.hash_pathset", side_effect=custom_hash_nonexistent + "orcapod.hashing.legacy_core.hash_pathset", + side_effect=custom_hash_nonexistent, ): result = pathset_hasher.hash_pathset(pathset) @@ -249,14 +252,14 @@ def test_default_pathset_hasher_with_char_count(): try: # Test with default char_count (32) - default_hasher = DefaultPathsetHasher(file_hasher) + default_hasher = LegacyDefaultPathsetHasher(file_hasher) default_result = default_hasher.hash_pathset(file_path) # Reset call list file_hasher.file_hash_calls = [] # Test with custom char_count - custom_hasher = DefaultPathsetHasher(file_hasher, char_count=16) + custom_hasher = LegacyDefaultPathsetHasher(file_hasher, char_count=16) custom_result = custom_hasher.hash_pathset(file_path) # Both should have called the file_hasher once diff --git a/tests/test_hashing/test_pathset_and_packet.py b/tests/test_hashing/test_pathset_and_packet.py index 6b7eb6f..cde79da 100644 --- a/tests/test_hashing/test_pathset_and_packet.py +++ b/tests/test_hashing/test_pathset_and_packet.py @@ -1,7 +1,6 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_pathset_and_packet.py """ -Test the hash_pathset and hash_packet functions from orcabridge.hashing. +Test the hash_pathset and hash_packet functions from orcapod.hashing. This module contains tests to verify the correct behavior of hash_pathset and hash_packet functions with various input types and configurations. @@ -14,7 +13,7 @@ import pytest -from orcapod.hashing import hash_file, hash_packet, hash_pathset +from orcapod.hashing.legacy_core import hash_file, hash_packet, hash_pathset logger = logging.getLogger(__name__) diff --git a/tests/test_hashing/test_pathset_packet_hashes.py b/tests/test_hashing/test_pathset_packet_hashes.py index 49e2d0c..7df740d 100644 --- a/tests/test_hashing/test_pathset_packet_hashes.py +++ b/tests/test_hashing/test_pathset_packet_hashes.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_hashing/test_pathset_packet_hashes.py """ Test pathset and packet hash consistency. @@ -12,8 +11,8 @@ import pytest -# Add the parent directory to the path to import orcabridge -from orcapod.hashing import hash_packet, hash_pathset +# Add the parent directory to the path to import orcapod +from orcapod.hashing.legacy_core import hash_packet, hash_pathset def load_pathset_hash_lut(): diff --git a/tests/test_hashing/test_process_structure.py b/tests/test_hashing/test_process_structure.py index 933e2dc..2967ed4 100644 --- a/tests/test_hashing/test_process_structure.py +++ b/tests/test_hashing/test_process_structure.py @@ -3,7 +3,7 @@ from pathlib import Path from typing import Any -from orcapod.hashing.core import HashableMixin, hash_to_hex, process_structure +from orcapod.hashing.legacy_core import HashableMixin, hash_to_hex, process_structure # Define a simple HashableMixin class for testing diff --git a/tests/test_hashing/test_string_cacher/test_redis_cacher.py b/tests/test_hashing/test_string_cacher/test_redis_cacher.py index 3ef49e1..eef7c43 100644 --- a/tests/test_hashing/test_string_cacher/test_redis_cacher.py +++ b/tests/test_hashing/test_string_cacher/test_redis_cacher.py @@ -68,21 +68,21 @@ def keys(self, pattern): return [key for key in self.data.keys() if key.startswith(prefix)] return [key for key in self.data.keys() if key == pattern] + class MockRedisModule: ConnectionError = MockConnectionError RedisError = MockRedisError Redis = MagicMock(return_value=MockRedis()) # Simple one-liner! - def mock_get_redis(): return MockRedisModule + def mock_no_redis(): return None - class TestRedisCacher: """Test cases for RedisCacher with mocked Redis.""" diff --git a/tests/test_store/conftest.py b/tests/test_store/conftest.py index 77ca9f9..6b8aa6f 100644 --- a/tests/test_store/conftest.py +++ b/tests/test_store/conftest.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_store/conftest.py """Common test fixtures for store tests.""" import shutil diff --git a/tests/test_store/test_dir_data_store.py b/tests/test_store/test_dir_data_store.py index c07f141..d7f6a3c 100644 --- a/tests/test_store/test_dir_data_store.py +++ b/tests/test_store/test_dir_data_store.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_store/test_dir_data_store.py """Tests for DirDataStore.""" import json @@ -9,15 +8,15 @@ import pytest from orcapod.hashing.types import ( - CompositeFileHasher, - FileHasher, - PacketHasher, - PathSetHasher, + LegacyCompositeFileHasher, + LegacyFileHasher, + LegacyPacketHasher, + LegacyPathSetHasher, ) -from orcapod.store.core import DirDataStore +from orcapod.stores.legacy.dict_data_stores import DirDataStore -class MockFileHasher(FileHasher): +class MockFileHasher(LegacyFileHasher): """Mock FileHasher for testing.""" def __init__(self, hash_value="mock_hash"): @@ -29,19 +28,19 @@ def hash_file(self, file_path): return f"{self.hash_value}_file" -class MockPathSetHasher(PathSetHasher): +class MockPathSetHasher(LegacyPathSetHasher): """Mock PathSetHasher for testing.""" def __init__(self, hash_value="mock_hash"): self.hash_value = hash_value self.pathset_hash_calls = [] - def hash_pathset(self, pathset): + def hash_pathset(self, pathset) -> str: self.pathset_hash_calls.append(pathset) return f"{self.hash_value}_pathset" -class MockPacketHasher(PacketHasher): +class MockPacketHasher(LegacyPacketHasher): """Mock PacketHasher for testing.""" def __init__(self, hash_value="mock_hash"): @@ -53,7 +52,7 @@ def hash_packet(self, packet): return f"{self.hash_value}_packet" -class MockCompositeHasher(CompositeFileHasher): +class MockCompositeHasher(LegacyCompositeFileHasher): """Mock CompositeHasher that implements all three hash protocols.""" def __init__(self, hash_value="mock_hash"): @@ -62,15 +61,15 @@ def __init__(self, hash_value="mock_hash"): self.pathset_hash_calls = [] self.packet_hash_calls = [] - def hash_file(self, file_path): + def hash_file_content(self, file_path): self.file_hash_calls.append(file_path) return f"{self.hash_value}_file" - def hash_pathset(self, pathset): + def hash_pathset(self, pathset) -> str: self.pathset_hash_calls.append(pathset) return f"{self.hash_value}_pathset" - def hash_packet(self, packet): + def hash_packet(self, packet) -> str: self.packet_hash_calls.append(packet) return f"{self.hash_value}_packet" @@ -87,7 +86,7 @@ def test_dir_data_store_init_default_hasher(temp_dir): assert store_dir.is_dir() # Verify the default PacketHasher is used - assert isinstance(store.packet_hasher, PacketHasher) + assert isinstance(store.packet_hasher, LegacyPacketHasher) # Check default parameters assert store.copy_files is True @@ -462,7 +461,7 @@ def test_dir_data_store_with_default_packet_hasher(temp_dir, sample_files): store = DirDataStore(store_dir=store_dir) # Verify that default PacketHasher was created - assert isinstance(store.packet_hasher, PacketHasher) + assert isinstance(store.packet_hasher, LegacyPacketHasher) # Test memoization and retrieval packet = {"input_file": sample_files["input"]["file1"]} @@ -500,7 +499,7 @@ def test_dir_data_store_legacy_mode_compatibility(temp_dir, sample_files): output_packet = {"output_file": sample_files["output"]["output1"]} # Get the hash values directly for comparison - from orcapod.hashing import hash_packet + from orcapod.hashing.legacy_core import hash_packet legacy_hash = hash_packet(packet, algorithm="sha256") assert store_default.packet_hasher is not None, ( @@ -611,7 +610,7 @@ def test_dir_data_store_hash_equivalence(temp_dir, sample_files): output_packet = {"output_file": sample_files["output"]["output1"]} # First compute hashes directly - from orcapod.hashing import hash_packet + from orcapod.hashing.legacy_core import hash_packet from orcapod.hashing.defaults import get_default_composite_file_hasher legacy_hash = hash_packet(packet, algorithm="sha256") diff --git a/tests/test_store/test_integration.py b/tests/test_store/test_integration.py index 023e6e6..0c50292 100644 --- a/tests/test_store/test_integration.py +++ b/tests/test_store/test_integration.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_store/test_integration.py """Integration tests for the store module.""" import os @@ -10,10 +9,10 @@ from orcapod.hashing.file_hashers import ( BasicFileHasher, CachedFileHasher, - DefaultCompositeFileHasher, + LegacyDefaultCompositeFileHasher, ) from orcapod.hashing.string_cachers import InMemoryCacher -from orcapod.store.core import DirDataStore, NoOpDataStore +from orcapod.stores.legacy.dict_data_stores import DirDataStore, NoOpDataStore def test_integration_with_cached_file_hasher(temp_dir, sample_files): @@ -29,7 +28,7 @@ def test_integration_with_cached_file_hasher(temp_dir, sample_files): ) # Create a CompositeFileHasher that will use the CachedFileHasher - composite_hasher = DefaultCompositeFileHasher(file_hasher) + composite_hasher = LegacyDefaultCompositeFileHasher(file_hasher) # Create the store with CompositeFileHasher store = DirDataStore(store_dir=store_dir, packet_hasher=composite_hasher) diff --git a/tests/test_store/test_noop_data_store.py b/tests/test_store/test_noop_data_store.py index 0da82c7..564b449 100644 --- a/tests/test_store/test_noop_data_store.py +++ b/tests/test_store/test_noop_data_store.py @@ -1,10 +1,9 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_store/test_noop_data_store.py """Tests for NoOpDataStore.""" import pytest -from orcapod.store.core import NoOpDataStore +from orcapod.stores.legacy.dict_data_stores import NoOpDataStore def test_noop_data_store_memoize(): @@ -44,7 +43,7 @@ def test_noop_data_store_retrieve_memoized(): def test_noop_data_store_is_data_store_subclass(): """Test that NoOpDataStore is a subclass of DataStore.""" - from orcapod.store.core import DataStore + from orcapod.stores import DataStore store = NoOpDataStore() assert isinstance(store, DataStore) diff --git a/tests/test_store/test_transfer_data_store.py b/tests/test_store/test_transfer_data_store.py index 85d0a87..f4076d6 100644 --- a/tests/test_store/test_transfer_data_store.py +++ b/tests/test_store/test_transfer_data_store.py @@ -1,18 +1,16 @@ #!/usr/bin/env python -# filepath: /home/eywalker/workspace/orcabridge/tests/test_store/test_transfer_data_store.py """Tests for TransferDataStore.""" -import json from pathlib import Path import pytest -from orcapod.hashing.types import PacketHasher -from orcapod.store.core import DirDataStore, NoOpDataStore -from orcapod.store.transfer import TransferDataStore +from orcapod.hashing.types import LegacyPacketHasher +from orcapod.stores.legacy.dict_data_stores import DirDataStore, NoOpDataStore +from orcapod.stores.legacy.dict_transfer_data_store import TransferDataStore -class MockPacketHasher(PacketHasher): +class MockPacketHasher(LegacyPacketHasher): """Mock PacketHasher for testing.""" def __init__(self, hash_value="mock_hash"): diff --git a/tests/test_types/__init__.py b/tests/test_types/__init__.py index aa691b1..2be2a50 100644 --- a/tests/test_types/__init__.py +++ b/tests/test_types/__init__.py @@ -1 +1 @@ -# Test package for orcabridge types module +# Test package for orcapod types module diff --git a/tests/test_types/test_inference/__init__.py b/tests/test_types/test_inference/__init__.py index 45e6baf..ae4cff0 100644 --- a/tests/test_types/test_inference/__init__.py +++ b/tests/test_types/test_inference/__init__.py @@ -1 +1 @@ -# Test package for orcabridge types inference module +# Test package for orcapod types inference module diff --git a/tests/test_types/test_inference/test_extract_function_data_types.py b/tests/test_types/test_inference/test_extract_function_data_types.py index a357bb0..8ae1ea5 100644 --- a/tests/test_types/test_inference/test_extract_function_data_types.py +++ b/tests/test_types/test_inference/test_extract_function_data_types.py @@ -1,5 +1,5 @@ """ -Unit tests for the extract_function_data_types function. +Unit tests for the extract_function_typespecs function. This module tests the function type extraction functionality, covering: - Type inference from function annotations @@ -11,11 +11,11 @@ import pytest from collections.abc import Collection -from orcapod.types.inference import extract_function_data_types +from orcapod.types.typespec_utils import extract_function_typespecs class TestExtractFunctionDataTypes: - """Test cases for extract_function_data_types function.""" + """Test cases for extract_function_typespecs function.""" def test_simple_annotated_function(self): """Test function with simple type annotations.""" @@ -23,7 +23,7 @@ def test_simple_annotated_function(self): def add(x: int, y: int) -> int: return x + y - input_types, output_types = extract_function_data_types(add, ["result"]) + input_types, output_types = extract_function_typespecs(add, ["result"]) assert input_types == {"x": int, "y": int} assert output_types == {"result": int} @@ -34,7 +34,7 @@ def test_multiple_return_values_tuple(self): def process(data: str) -> tuple[int, str]: return len(data), data.upper() - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( process, ["length", "upper_data"] ) @@ -54,7 +54,7 @@ def split_data(data: str) -> tuple[str, str]: # Note: This tests the case where we have multiple output keys # but the return type is list[str] (homogeneous) - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( split_data, ["first_word", "second_word"] ) @@ -71,7 +71,7 @@ def mystery_func(x: int): ValueError, match="Type for return item 'number' is not specified in output_types", ): - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( mystery_func, ["number", "text"], ) @@ -82,7 +82,7 @@ def test_input_types_override(self): def legacy_func(x, y) -> int: # No annotations return x + y - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( legacy_func, ["sum"], input_types={"x": int, "y": int} ) @@ -95,7 +95,7 @@ def test_partial_input_types_override(self): def mixed_func(x: int, y) -> int: # One annotated, one not return x + y - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( mixed_func, ["sum"], input_types={"y": float} ) @@ -108,7 +108,7 @@ def test_output_types_dict_override(self): def mystery_func(x: int) -> str: return str(x) - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( mystery_func, ["result"], output_types={"result": float} ) @@ -121,7 +121,7 @@ def test_output_types_sequence_override(self): def multi_return(data: list) -> tuple[int, float, str]: return len(data), sum(data), str(data) - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( multi_return, ["count", "total", "repr"], output_types=[int, float, str] ) @@ -134,7 +134,7 @@ def test_complex_types(self): def complex_func(x: str | None, y: int | float) -> tuple[bool, list[str]]: return bool(x), [x] if x else [] - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( complex_func, ["is_valid", "items"] ) @@ -147,7 +147,7 @@ def test_none_return_annotation(self): def side_effect_func(x: int) -> None: print(x) - input_types, output_types = extract_function_data_types(side_effect_func, []) + input_types, output_types = extract_function_typespecs(side_effect_func, []) assert input_types == {"x": int} assert output_types == {} @@ -158,7 +158,7 @@ def test_empty_parameters(self): def get_constant() -> int: return 42 - input_types, output_types = extract_function_data_types(get_constant, ["value"]) + input_types, output_types = extract_function_typespecs(get_constant, ["value"]) assert input_types == {} assert output_types == {"value": int} @@ -172,7 +172,7 @@ def bad_func(x, y: int): return x + y with pytest.raises(ValueError, match="Parameter 'x' has no type annotation"): - extract_function_data_types(bad_func, ["result"]) + extract_function_typespecs(bad_func, ["result"]) def test_return_annotation_but_no_output_keys_error(self): """Test error when function has return annotation but no output keys.""" @@ -184,7 +184,7 @@ def func_with_return(x: int) -> str: ValueError, match="Function has a return type annotation, but no return keys were specified", ): - extract_function_data_types(func_with_return, []) + extract_function_typespecs(func_with_return, []) def test_none_return_with_output_keys_error(self): """Test error when function returns None but output keys provided.""" @@ -196,7 +196,7 @@ def side_effect_func(x: int) -> None: ValueError, match="Function provides explicit return type annotation as None", ): - extract_function_data_types(side_effect_func, ["result"]) + extract_function_typespecs(side_effect_func, ["result"]) def test_single_return_multiple_keys_error(self): """Test error when single return type but multiple output keys.""" @@ -208,7 +208,7 @@ def single_return(x: int) -> str: ValueError, match="Multiple return keys were specified but return type annotation .* is not a sequence type", ): - extract_function_data_types(single_return, ["first", "second"]) + extract_function_typespecs(single_return, ["first", "second"]) def test_unparameterized_sequence_type_error(self): """Test error when return type is sequence but not parameterized.""" @@ -219,7 +219,7 @@ def bad_return(x: int) -> tuple: # tuple without types with pytest.raises( ValueError, match="is a Sequence type but does not specify item types" ): - extract_function_data_types(bad_return, ["number", "text"]) + extract_function_typespecs(bad_return, ["number", "text"]) def test_mismatched_return_types_count_error(self): """Test error when return type count doesn't match output keys count.""" @@ -230,7 +230,7 @@ def three_returns(x: int) -> tuple[int, str, float]: with pytest.raises( ValueError, match="has 3 items, but output_keys has 2 items" ): - extract_function_data_types(three_returns, ["first", "second"]) + extract_function_typespecs(three_returns, ["first", "second"]) def test_mismatched_output_types_sequence_length_error(self): """Test error when output_types sequence length doesn't match output_keys.""" @@ -242,7 +242,7 @@ def func(x: int) -> tuple[int, str]: ValueError, match="Output types collection length .* does not match return keys length", ): - extract_function_data_types( + extract_function_typespecs( func, ["first", "second"], output_types=[int, str, float], # Wrong length @@ -258,7 +258,7 @@ def no_return_annotation(x: int): ValueError, match="Type for return item 'first' is not specified in output_types", ): - extract_function_data_types(no_return_annotation, ["first", "second"]) + extract_function_typespecs(no_return_annotation, ["first", "second"]) # Edge cases @@ -268,7 +268,7 @@ def test_callable_with_args_kwargs(self): def flexible_func(x: int, *args: str, **kwargs: float) -> bool: return True - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( flexible_func, ["success"] ) @@ -284,7 +284,7 @@ def test_mixed_override_scenarios(self): def complex_func(a, b: str) -> tuple[int, str]: return len(b), b.upper() - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( complex_func, ["length", "upper"], input_types={"a": float}, @@ -300,7 +300,7 @@ def test_generic_types(self): def generic_func(data: list[int]) -> dict[str, int]: return {str(i): i for i in data} - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( generic_func, ["mapping"] ) @@ -316,7 +316,7 @@ def list_func( return str(x), x # This tests the sequence detection logic - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( list_func, ["text", "number"] ) @@ -330,7 +330,7 @@ def collection_func(x: int) -> Collection[str]: return [str(x)] # Single output key with Collection type - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( collection_func, ["result"] ) @@ -347,7 +347,7 @@ def test_empty_function(self): def empty_func(): pass - input_types, output_types = extract_function_data_types(empty_func, []) + input_types, output_types = extract_function_typespecs(empty_func, []) assert input_types == {} assert output_types == {} @@ -364,7 +364,7 @@ class Container(Generic[T]): def generic_container_func(x: Container[int]) -> Container[str]: return Container() - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( generic_container_func, ["result"] ) @@ -377,7 +377,7 @@ def test_output_types_dict_partial_override(self): def three_output_func() -> tuple[int, str, float]: return 1, "hello", 3.14 - input_types, output_types = extract_function_data_types( + input_types, output_types = extract_function_typespecs( three_output_func, ["num", "text", "decimal"], output_types={"text": bytes}, # Override only middle one diff --git a/uv.lock b/uv.lock index 589ebc2..ba522ac 100644 --- a/uv.lock +++ b/uv.lock @@ -1230,7 +1230,7 @@ requires-dist = [ { name = "matplotlib", specifier = ">=3.10.3" }, { name = "networkx" }, { name = "pandas", specifier = ">=2.2.3" }, - { name = "polars", specifier = ">=1.30.0" }, + { name = "polars", specifier = ">=1.31.0" }, { name = "pyarrow", specifier = ">=20.0.0" }, { name = "pyyaml", specifier = ">=6.0.2" }, { name = "redis", marker = "extra == 'redis'", specifier = ">=6.2.0" }, @@ -1436,16 +1436,16 @@ wheels = [ [[package]] name = "polars" -version = "1.30.0" +version = "1.31.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/82/b6/8dbdf626c0705a57f052708c9fc0860ffc2aa97955930d5faaf6a66fcfd3/polars-1.30.0.tar.gz", hash = "sha256:dfe94ae84a5efd9ba74e616e3e125b24ca155494a931890a8f17480737c4db45", size = 4668318, upload-time = "2025-05-21T13:33:24.175Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/f5/de1b5ecd7d0bd0dd87aa392937f759f9cc3997c5866a9a7f94eabf37cd48/polars-1.31.0.tar.gz", hash = "sha256:59a88054a5fc0135386268ceefdbb6a6cc012d21b5b44fed4f1d3faabbdcbf32", size = 4681224, upload-time = "2025-06-18T12:00:46.24Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/40/48/e9b2cb379abcc9f7aff2e701098fcdb9fe6d85dc4ad4cec7b35d39c70951/polars-1.30.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4c33bc97c29b7112f0e689a2f8a33143973a3ff466c70b25c7fd1880225de6dd", size = 35704342, upload-time = "2025-05-21T13:32:22.996Z" }, - { url = "https://files.pythonhosted.org/packages/36/ca/f545f61282f75eea4dfde4db2944963dcd59abd50c20e33a1c894da44dad/polars-1.30.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:e3d05914c364b8e39a5b10dcf97e84d76e516b3b1693880bf189a93aab3ca00d", size = 32459857, upload-time = "2025-05-21T13:32:27.728Z" }, - { url = "https://files.pythonhosted.org/packages/76/20/e018cd87d7cb6f8684355f31f4e193222455a6e8f7b942f4a2934f5969c7/polars-1.30.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a52af3862082b868c1febeae650af8ae8a2105d2cb28f0449179a7b44f54ccf", size = 36267243, upload-time = "2025-05-21T13:32:31.796Z" }, - { url = "https://files.pythonhosted.org/packages/cb/e7/b88b973021be07b13d91b9301cc14392c994225ef5107a32a8ffd3fd6424/polars-1.30.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:ffb3ef133454275d4254442257c5f71dd6e393ce365c97997dadeb6fa9d6d4b5", size = 33416871, upload-time = "2025-05-21T13:32:35.077Z" }, - { url = "https://files.pythonhosted.org/packages/dd/7c/d46d4381adeac537b8520b653dc30cb8b7edbf59883d71fbb989e9005de1/polars-1.30.0-cp39-abi3-win_amd64.whl", hash = "sha256:c26b633a9bd530c5fc09d317fca3bb3e16c772bd7df7549a9d8ec1934773cc5d", size = 36363630, upload-time = "2025-05-21T13:32:38.286Z" }, - { url = "https://files.pythonhosted.org/packages/fb/b5/5056d0c12aadb57390d0627492bef8b1abf3549474abb9ae0fd4e2bfa885/polars-1.30.0-cp39-abi3-win_arm64.whl", hash = "sha256:476f1bde65bc7b4d9f80af370645c2981b5798d67c151055e58534e89e96f2a8", size = 32643590, upload-time = "2025-05-21T13:32:42.107Z" }, + { url = "https://files.pythonhosted.org/packages/3d/6e/bdd0937653c1e7a564a09ae3bc7757ce83fedbf19da600c8b35d62c0182a/polars-1.31.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ccc68cd6877deecd46b13cbd2663ca89ab2a2cb1fe49d5cfc66a9cef166566d9", size = 34511354, upload-time = "2025-06-18T11:59:40.048Z" }, + { url = "https://files.pythonhosted.org/packages/77/fe/81aaca3540c1a5530b4bc4fd7f1b6f77100243d7bb9b7ad3478b770d8b3e/polars-1.31.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:a94c5550df397ad3c2d6adc212e59fd93d9b044ec974dd3653e121e6487a7d21", size = 31377712, upload-time = "2025-06-18T11:59:45.104Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d9/5e2753784ea30d84b3e769a56f5e50ac5a89c129e87baa16ac0773eb4ef7/polars-1.31.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada7940ed92bea65d5500ae7ac1f599798149df8faa5a6db150327c9ddbee4f1", size = 35050729, upload-time = "2025-06-18T11:59:48.538Z" }, + { url = "https://files.pythonhosted.org/packages/20/e8/a6bdfe7b687c1fe84bceb1f854c43415eaf0d2fdf3c679a9dc9c4776e462/polars-1.31.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:b324e6e3e8c6cc6593f9d72fe625f06af65e8d9d47c8686583585533a5e731e1", size = 32260836, upload-time = "2025-06-18T11:59:52.543Z" }, + { url = "https://files.pythonhosted.org/packages/6e/f6/9d9ad9dc4480d66502497e90ce29efc063373e1598f4bd9b6a38af3e08e7/polars-1.31.0-cp39-abi3-win_amd64.whl", hash = "sha256:3fd874d3432fc932863e8cceff2cff8a12a51976b053f2eb6326a0672134a632", size = 35156211, upload-time = "2025-06-18T11:59:55.805Z" }, + { url = "https://files.pythonhosted.org/packages/40/4b/0673a68ac4d6527fac951970e929c3b4440c654f994f0c957bd5556deb38/polars-1.31.0-cp39-abi3-win_arm64.whl", hash = "sha256:62ef23bb9d10dca4c2b945979f9a50812ac4ace4ed9e158a6b5d32a7322e6f75", size = 31469078, upload-time = "2025-06-18T11:59:59.242Z" }, ] [[package]]