diff --git a/.cursor/rules/pr-comments.mdc b/.cursor/rules/pr-comments.mdc deleted file mode 120000 index 4b5e57d6..00000000 --- a/.cursor/rules/pr-comments.mdc +++ /dev/null @@ -1 +0,0 @@ -../../.ai/rules/pr-comments.mdc \ No newline at end of file diff --git a/.gitignore b/.gitignore index 62171e1d..8ec81799 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,5 @@ packages/transloadit/README.md packages/transloadit/CHANGELOG.md packages/transloadit/LICENSE package.tgz +packages/mcp-server/.mcpregistry_github_token +packages/mcp-server/.mcpregistry_registry_token diff --git a/docs/fingerprint/transloadit-after.json b/docs/fingerprint/transloadit-after.json index cca93ce0..d4e5197d 100644 --- a/docs/fingerprint/transloadit-after.json +++ b/docs/fingerprint/transloadit-after.json @@ -1,5 +1,5 @@ { - "packageDir": "/home/kvz/code/node-sdk/packages/transloadit", + "packageDir": "packages/transloadit", "tarball": { "filename": "transloadit-4.1.2.tgz", "sizeBytes": 1110470, diff --git a/docs/fingerprint/transloadit-baseline.json b/docs/fingerprint/transloadit-baseline.json index 521052f5..e7f329a2 100644 --- a/docs/fingerprint/transloadit-baseline.json +++ b/docs/fingerprint/transloadit-baseline.json @@ -1,9 +1,9 @@ { - "packageDir": "/home/kvz/code/node-sdk/packages/transloadit", + "packageDir": "packages/transloadit", "tarball": { "filename": "transloadit-4.7.5.tgz", - "sizeBytes": 1250742, - "sha256": "195c48c7b93e44360d29e3c74d3dbb720503242123a7a57c3387000c71b72c1a" + "sizeBytes": 1329164, + "sha256": "0be45e33a585acb5648bea70b10ef6c8371b5952a890fe378739172338d43a03" }, "packageJson": { "name": "transloadit", @@ -33,8 +33,8 @@ }, { "path": "dist/alphalib/types/robots/ai-chat.js", - "sizeBytes": 9598, - "sha256": "450227323009921c8c436c4ca3d325846d4ccab7e47a5a29b94ebefd93170492" + "sizeBytes": 9661, + "sha256": "518b0b96173e89cc8f045a068b9ed080256d73f6bdbd486350773650026cbfc3" }, { "path": "dist/ApiError.js", @@ -48,8 +48,8 @@ }, { "path": "dist/cli/commands/assemblies.js", - "sizeBytes": 51217, - "sha256": "c368505ba2086dfbcc6148c5ac656a9ac228093cf8cebe95ba650f4dfe21592d" + "sizeBytes": 53557, + "sha256": "733b23ad1c8c65a9be218fdb0458d7b90da4ada9304198ab0935cf5f4b71f4ae" }, { "path": "dist/alphalib/types/assembliesGet.js", @@ -178,8 +178,8 @@ }, { "path": "dist/cli.js", - "sizeBytes": 1147, - "sha256": "4d52d0cea6f64abe67fd99d9bdf14dee38a51ee9c366eb45f110f38ab008f4dd" + "sizeBytes": 1219, + "sha256": "b959001b789f7ebd47577de52ea067ea8115a2710b678f53bd479fadaba75aed" }, { "path": "dist/alphalib/types/robots/cloudfiles-import.js", @@ -266,6 +266,11 @@ "sizeBytes": 1993, "sha256": "268fedb80d76c3a8406f6ea79ee453ce1b821dc9bdc2bc92977e429aeca9bcc2" }, + { + "path": "dist/ensureUniqueCounter.js", + "sizeBytes": 1431, + "sha256": "1066bcc2369c0c784d05428a1bb22b670b9be6241ddb8e1b69606c9ba3ed3266" + }, { "path": "dist/alphalib/types/robots/file-compress.js", "sizeBytes": 6014, @@ -316,6 +321,11 @@ "sizeBytes": 1228, "sha256": "474e8f93000f842761a1cebe9282c17eeba8c809f1d8ef25db026796edacbf89" }, + { + "path": "dist/cli/fileProcessingOptions.js", + "sizeBytes": 7306, + "sha256": "8291853fd88b397a758b81053668dfb728d07312840657c43af15b7434087901" + }, { "path": "dist/alphalib/types/robots/ftp-import.js", "sizeBytes": 2406, @@ -326,6 +336,11 @@ "sizeBytes": 3534, "sha256": "c4bd648bb097acadbc349406192105367b9d94c516700b99c9f4d7a4b6c7a6f0" }, + { + "path": "dist/cli/generateIntentDocs.js", + "sizeBytes": 11641, + "sha256": "b9b9bf05020ff6c452c3c3fd8b878fba73d4d49cf6ba77714ec0cfad6e763c17" + }, { "path": "dist/alphalib/types/robots/google-import.js", "sizeBytes": 3748, @@ -391,6 +406,11 @@ "sizeBytes": 27934, "sha256": "7683dca61e77618aad347431b7693fac282d208526dde351ba86387a53c962f4" }, + { + "path": "dist/cli/semanticIntents/imageDescribe.js", + "sizeBytes": 7347, + "sha256": "1ac1b94f250a4c9cf8b37cffaebe49ed0a994ad622a390cf967e9f5858fc13a1" + }, { "path": "dist/InconsistentResponseError.js", "sizeBytes": 158, @@ -398,13 +418,43 @@ }, { "path": "dist/cli/commands/index.js", - "sizeBytes": 2145, - "sha256": "b44764be9d6a803669bbc1a937f553566ce91993ed283c7f6d5ef65cbff6b263" + "sizeBytes": 2310, + "sha256": "a60cff637a0113cfbbf64ff93eb3fa00da81119dae258555326b9cde940556dc" + }, + { + "path": "dist/cli/semanticIntents/index.js", + "sizeBytes": 712, + "sha256": "b7edabdaa145ba3ebb0e785290303cdceb954645e146855585456e4cf6fafcda" }, { "path": "dist/inputFiles.js", - "sizeBytes": 7836, - "sha256": "1d77d129abc1b11be894d1cf6c34afc93370165e39871d6d5b672c058d1a0489" + "sizeBytes": 14263, + "sha256": "a7b721275494cf4abc2d86ef85fd080a94e9a6d35d58e219d43f72d568a87c0d" + }, + { + "path": "dist/cli/intentCommands.js", + "sizeBytes": 13030, + "sha256": "25599f79eab593c96a55ca216bdb46ae02f8a45f6abe66ba79c7c747db2adfaf" + }, + { + "path": "dist/cli/intentCommandSpecs.js", + "sizeBytes": 7079, + "sha256": "f80fb2fda1b9c0c1cb49f132a5a03aeec51c91961d54973f117403a18547407e" + }, + { + "path": "dist/cli/intentFields.js", + "sizeBytes": 9567, + "sha256": "5a35c9900a09529475d844144658bd801d0d5c50ef95eee0310947b72d6d16a0" + }, + { + "path": "dist/cli/intentInputPolicy.js", + "sizeBytes": 56, + "sha256": "f2dfdc05ddec25bf8ae63448d8e562ff7ba6ec3b17b4ea4be0adb151017c5991" + }, + { + "path": "dist/cli/intentRuntime.js", + "sizeBytes": 16866, + "sha256": "1b3f1cd84f162f33e60b7416a4f0be0cbab9d5840ed5f44bec4d7d239e4e9eeb" }, { "path": "dist/lintAssemblyInput.js", @@ -416,6 +466,11 @@ "sizeBytes": 1546, "sha256": "561ba7f86c96d2481fc21ae81be056adf34af5b6deea434f0993b889927fbf89" }, + { + "path": "dist/cli/semanticIntents/markdownPdf.js", + "sizeBytes": 3562, + "sha256": "ddbba834eeb5c592c44a525781ae951ea5e9e3588b855161d42c1a42730b18e3" + }, { "path": "dist/alphalib/mcache.js", "sizeBytes": 5145, @@ -466,6 +521,11 @@ "sizeBytes": 1391, "sha256": "7a9f0562b680fef9312a59b5ac88d61e9c8abeee903a4a42ffca3b39d1a59b06" }, + { + "path": "dist/cli/semanticIntents/parsing.js", + "sizeBytes": 1021, + "sha256": "eb1493ecf0626b334a038603bd773a78310a20adbda39306738cc7bc4e03b9ab" + }, { "path": "dist/PollingTimeoutError.js", "sizeBytes": 172, @@ -476,6 +536,16 @@ "sizeBytes": 935, "sha256": "e01935073eab55214d9e37fa2d25e5615368efb8e9e2aedfa7a765e0d6e2bd84" }, + { + "path": "dist/cli/resultFiles.js", + "sizeBytes": 1839, + "sha256": "32e08477f67770ecbd65f63f1ecd05ce8eabc85caab495253c3319a6d4c8da33" + }, + { + "path": "dist/cli/resultUrls.js", + "sizeBytes": 1472, + "sha256": "745230607982ef264d4c59994748a0667b5f4ad82306864643cf9975a3c0209a" + }, { "path": "dist/robots.js", "sizeBytes": 8374, @@ -526,6 +596,11 @@ "sizeBytes": 43744, "sha256": "f7132f0384bd0f88787ca452f9145736ffbecf5a0975ee7bd1bd811aa4dde7f6" }, + { + "path": "dist/cli/stepsInput.js", + "sizeBytes": 947, + "sha256": "f491af528805b3a53f827f369202a05ff9a2f98d370be2d66d079679b7bcf6ee" + }, { "path": "dist/alphalib/types/robots/supabase-import.js", "sizeBytes": 4131, @@ -568,8 +643,8 @@ }, { "path": "dist/cli/commands/templates.js", - "sizeBytes": 17507, - "sha256": "632108b45ea9db0807be32316f73e71425134754a0657e83675287eefc777de8" + "sizeBytes": 16974, + "sha256": "f2ff5967c7f316469e53ad04337c03cc394837e4c457829bbc167755d30c1d4c" }, { "path": "dist/alphalib/types/robots/text-speak.js", @@ -599,7 +674,7 @@ { "path": "dist/Transloadit.js", "sizeBytes": 37922, - "sha256": "500d82f5b654da175e301294540522718b2a81e15d87c3cd365f074fe961a769" + "sha256": "da28e944dd0a9cadb5a2cecdb2d859a639a53abd4d45489fab02069115918b6a" }, { "path": "dist/alphalib/tryCatch.js", @@ -698,8 +773,8 @@ }, { "path": "package.json", - "sizeBytes": 2730, - "sha256": "313dd2ac13d3e4857b71bd889b2c9fa7f2458cf2bf5be2dd5a1996eb3d23199d" + "sizeBytes": 2855, + "sha256": "bf0b13fc2703400268108db330445563468f10088f9001eb884afd34a8e79522" }, { "path": "dist/alphalib/types/robots/_index.d.ts.map", @@ -724,12 +799,12 @@ { "path": "dist/alphalib/types/robots/ai-chat.d.ts.map", "sizeBytes": 3222, - "sha256": "349f27bcb874de663e0f67dc560546968ca50da02aab01eea7c8225400bca5f6" + "sha256": "0e23d607129767f18eb92b4f907a23f299234bceab87e651eab89209c284b42e" }, { "path": "dist/alphalib/types/robots/ai-chat.js.map", - "sizeBytes": 7650, - "sha256": "6c31c017e41a533499a0146d9ff4bd692a91d629ca2ee60e2c7d2ff88194f43b" + "sizeBytes": 7713, + "sha256": "9f80c0fbe2a754c9d659bb166393a8216e68d1007c91295724e1405ee6b8bf92" }, { "path": "dist/ApiError.d.ts.map", @@ -753,13 +828,13 @@ }, { "path": "dist/cli/commands/assemblies.d.ts.map", - "sizeBytes": 3737, - "sha256": "e659be90cee8252d9fa4a5db72cf3d48d2548d0f5a716368cc024f7ed1e4b222" + "sizeBytes": 3983, + "sha256": "a4b8d3a3c9c3758a06c982b0654cffac60ce26a3fb3156466ba7128027d0db96" }, { "path": "dist/cli/commands/assemblies.js.map", - "sizeBytes": 44866, - "sha256": "8bc2496707790b60dfde07065b6df6adc7152d04e999ed4c84f1993eaeadc28f" + "sizeBytes": 48725, + "sha256": "8d1c836fe4e8af2f0fff3e0d750ad763c56ef1c4cd2f6140a4f4a7e13560225f" }, { "path": "dist/alphalib/types/assembliesGet.d.ts.map", @@ -1013,13 +1088,13 @@ }, { "path": "dist/cli.d.ts.map", - "sizeBytes": 278, - "sha256": "5e6f1a916256a81fdc3e6678644c191a87bf1bbcb273e0e256a1e04533c045cd" + "sizeBytes": 293, + "sha256": "a9194c2c071b9b11546084324533c30a9188733778b0318be50f6a0f1917b6ae" }, { "path": "dist/cli.js.map", - "sizeBytes": 1335, - "sha256": "aa838fe53a894d7c2eca041e15963a3ddb50e3bf127911f472da237aec07ae56" + "sizeBytes": 1408, + "sha256": "00a1c4a99a63ed2b06d9529979d476ffacc8594bad5d891e6a2245fabd0fdfea" }, { "path": "dist/alphalib/types/robots/cloudfiles-import.d.ts.map", @@ -1191,6 +1266,16 @@ "sizeBytes": 1366, "sha256": "bf5b2ae88cc181f02afaf1ed026a327b8f1d3b4cec26ee2b87cb63d7dee4f17d" }, + { + "path": "dist/ensureUniqueCounter.d.ts.map", + "sizeBytes": 496, + "sha256": "f66924841143d5292339507d752fefc7d745c5979514f956c67ad70e490e1816" + }, + { + "path": "dist/ensureUniqueCounter.js.map", + "sizeBytes": 1452, + "sha256": "3e92164cbbc5518aad63241470f63338057435b549d6f55233301e8b5ae1b721" + }, { "path": "dist/alphalib/types/robots/file-compress.d.ts.map", "sizeBytes": 1263, @@ -1291,6 +1376,16 @@ "sizeBytes": 1017, "sha256": "6583f0e6b3a04b39758bc60bbd77383f00715365ac714be95b871ba6797050b9" }, + { + "path": "dist/cli/fileProcessingOptions.d.ts.map", + "sizeBytes": 1573, + "sha256": "07e973b460ec4f6bc33c847caff81c037bad2c0218cfd45840fd560cf3347056" + }, + { + "path": "dist/cli/fileProcessingOptions.js.map", + "sizeBytes": 5543, + "sha256": "780b8f8b4a1368b11e99159994b39670e77fcfe1dbd2490600bb2e63f62b1587" + }, { "path": "dist/alphalib/types/robots/ftp-import.d.ts.map", "sizeBytes": 976, @@ -1311,6 +1406,16 @@ "sizeBytes": 2145, "sha256": "ce1bf48c1cc713ae843061cba3c3b119475baa5cb6b62ac4b575e50b297bcf71" }, + { + "path": "dist/cli/generateIntentDocs.d.ts.map", + "sizeBytes": 137, + "sha256": "02ea53975f9b3a23e1a818db4c3b755229e06ecf2ae838ff8b5fe672b3127bb3" + }, + { + "path": "dist/cli/generateIntentDocs.js.map", + "sizeBytes": 10178, + "sha256": "7871db734d48209977ffbff0f64810ba3d7860700cab6a48a1917fade2731575" + }, { "path": "dist/alphalib/types/robots/google-import.d.ts.map", "sizeBytes": 960, @@ -1441,6 +1546,16 @@ "sizeBytes": 9404, "sha256": "655db1c155f512b6b8b9fa21e8a6150ecab07e3b366e186694ef2b289f04b688" }, + { + "path": "dist/cli/semanticIntents/imageDescribe.d.ts.map", + "sizeBytes": 369, + "sha256": "f19041239dc6a3a59cba7469b24557feae411c6584483991b92f555dd2c7f76a" + }, + { + "path": "dist/cli/semanticIntents/imageDescribe.js.map", + "sizeBytes": 4469, + "sha256": "dd5584031d838472d9f2479e63bb437188ca674fa83b81ad948a576d1b52369b" + }, { "path": "dist/InconsistentResponseError.d.ts.map", "sizeBytes": 208, @@ -1454,22 +1569,82 @@ { "path": "dist/cli/commands/index.d.ts.map", "sizeBytes": 198, - "sha256": "3f955192e7d7832d6fd0c8ee0244b153e42c947686425750c7c8c58d6657f2a7" + "sha256": "b9e290d2d6c1c22f6396324b843032e88c2360d1d18fe45417519d1976df6a1e" + }, + { + "path": "dist/cli/semanticIntents/index.d.ts.map", + "sizeBytes": 844, + "sha256": "ba2f0a7b3f8b46fc34a5f42c10ba3aec67f9fd6ad46da5716a3b076977815a9d" }, { "path": "dist/cli/commands/index.js.map", - "sizeBytes": 1940, - "sha256": "1cad8333ee5fd6c34071a6d8528a7b55399be0626baf1754e28453d714836868" + "sizeBytes": 2088, + "sha256": "0c6068340d4cb461b7512bb30a77a6aad97a694fadab9c5a865d5cd52bd9f941" + }, + { + "path": "dist/cli/semanticIntents/index.js.map", + "sizeBytes": 564, + "sha256": "b2fb3dc5c6996eec5f528988648282e02a7b69c5aae556d354486e08f2197f9e" }, { "path": "dist/inputFiles.d.ts.map", - "sizeBytes": 1438, - "sha256": "ac8a1b3b69cfd346810bd841eb66bc8b61788a56ba75c1149dc7fba5757009b0" + "sizeBytes": 1772, + "sha256": "78a0f3fa2436ceef34d0b5c6a6a19f2d9dd66d255db7d93b552fc2e0575e496c" }, { "path": "dist/inputFiles.js.map", - "sizeBytes": 8595, - "sha256": "fa96090c58247759bef9b7767bd4b4f474bba332ee5a6edf0429e89e99a0c25c" + "sizeBytes": 14889, + "sha256": "376dc151d3746dbe074a72af20f6ded163ae237f3097fa125f1a1e4ba67c91f4" + }, + { + "path": "dist/cli/intentCommands.d.ts.map", + "sizeBytes": 546, + "sha256": "73d5f4fc9872f6119a1064f9767aa64da13524b4db8a1327d5bc6377a3e5f649" + }, + { + "path": "dist/cli/intentCommands.js.map", + "sizeBytes": 11291, + "sha256": "b4dfeb39c1007765bdbd79adea53614136aeb9f9b2a3e34273eae2db1db3b5ff" + }, + { + "path": "dist/cli/intentCommandSpecs.d.ts.map", + "sizeBytes": 1276, + "sha256": "dd3fa43dbe3163e9b1523d46204f9462a87dcfb57359b8f08a895eb65b52b85b" + }, + { + "path": "dist/cli/intentCommandSpecs.js.map", + "sizeBytes": 5294, + "sha256": "3779e0d063e2751c4ea318fbd00a5c9dbf3641f59eb9695372bff405cf830e06" + }, + { + "path": "dist/cli/intentFields.d.ts.map", + "sizeBytes": 1034, + "sha256": "631fe19761406fb6fda4a737bc9c5249f06fd67c98779b7c51273e67dd994f94" + }, + { + "path": "dist/cli/intentFields.js.map", + "sizeBytes": 9386, + "sha256": "1777af4630ca6d645990bfc0003ea5f1dbce28dc722e5bdfc50da71ba7101e4e" + }, + { + "path": "dist/cli/intentInputPolicy.d.ts.map", + "sizeBytes": 346, + "sha256": "a4d49f03eba0c6811f065f0048f3f3efa454f32eee70050ce598e180d50827db" + }, + { + "path": "dist/cli/intentInputPolicy.js.map", + "sizeBytes": 133, + "sha256": "3f85c00a0565c65820326f2e6c694648153782cce52bb6b806dd4a68896669b1" + }, + { + "path": "dist/cli/intentRuntime.d.ts.map", + "sizeBytes": 4152, + "sha256": "a417392d5364cd67c7a943ad30434dde82bdde8ff306c8884c7070d850d0611e" + }, + { + "path": "dist/cli/intentRuntime.js.map", + "sizeBytes": 14344, + "sha256": "36d1a03b18143667a9bf96edbd32372f734be9f3a5e275ad714f9e53a037ca68" }, { "path": "dist/lintAssemblyInput.d.ts.map", @@ -1491,6 +1666,16 @@ "sizeBytes": 1530, "sha256": "5aa4eee76ca12657a5c14abbe32c9b7d215bb641cde96da62695c290bee18e12" }, + { + "path": "dist/cli/semanticIntents/markdownPdf.d.ts.map", + "sizeBytes": 259, + "sha256": "f78aca0880c733033c6228b5f7af830ff8e28d80986ad8a2636cfc97fa50abfb" + }, + { + "path": "dist/cli/semanticIntents/markdownPdf.js.map", + "sizeBytes": 2115, + "sha256": "f5c08b48224d809c8859deb39f41c737dee71dc18930e3a94da6c0c9c3cf805f" + }, { "path": "dist/alphalib/mcache.d.ts.map", "sizeBytes": 968, @@ -1591,6 +1776,16 @@ "sizeBytes": 1478, "sha256": "e8f211d2956724af6936dfa9eb715a813925e43646136adf0e3da9a185594361" }, + { + "path": "dist/cli/semanticIntents/parsing.d.ts.map", + "sizeBytes": 545, + "sha256": "f6013135ed3fd450adab752591558b4e534abdda8d5feef4cefbf5f7b0d07ec3" + }, + { + "path": "dist/cli/semanticIntents/parsing.js.map", + "sizeBytes": 1153, + "sha256": "ca8ee3e14a26b695738bfe15c50dbfd0d397236ea0664e086dc69fec8dbc7bc2" + }, { "path": "dist/PollingTimeoutError.d.ts.map", "sizeBytes": 213, @@ -1611,6 +1806,26 @@ "sizeBytes": 854, "sha256": "c743fb4ea5217d34ff665926bd14ecbb259dec99c2de862abfe787ece58817a0" }, + { + "path": "dist/cli/resultFiles.d.ts.map", + "sizeBytes": 632, + "sha256": "03b62467c4747a1b902bf1becfa2cbf2adac2a7f116bc676311ce5317920c940" + }, + { + "path": "dist/cli/resultFiles.js.map", + "sizeBytes": 1969, + "sha256": "dd5c9fc2f547e875d4b51bbaf101b523e4006831e8512412b06368412b0975cf" + }, + { + "path": "dist/cli/resultUrls.d.ts.map", + "sizeBytes": 767, + "sha256": "86344c746e11849ae8063b6795d5a043ea57e6b482eeaefc2d166bf364277a8c" + }, + { + "path": "dist/cli/resultUrls.js.map", + "sizeBytes": 2000, + "sha256": "98b07c0bdf4a1402a7c3b8ed8968a4f18bba6cebff5adc1c844d9753386e59e3" + }, { "path": "dist/robots.d.ts.map", "sizeBytes": 1181, @@ -1711,6 +1926,16 @@ "sizeBytes": 41144, "sha256": "dc79c2623b6a27419f28023555ee9895ceeb0a387d0ac641f774090dcbc2c6fb" }, + { + "path": "dist/cli/stepsInput.d.ts.map", + "sizeBytes": 294, + "sha256": "b5b968d0ff47f7d6db5e08d6807c0dc37310c1bc02c04240222af0dd453ad860" + }, + { + "path": "dist/cli/stepsInput.js.map", + "sizeBytes": 1111, + "sha256": "8fe4317f1a79192083d3a01f9c7dbbed5b629b334fc1e6e20154b3c4b70e579d" + }, { "path": "dist/alphalib/types/robots/supabase-import.d.ts.map", "sizeBytes": 1036, @@ -1794,12 +2019,12 @@ { "path": "dist/cli/commands/templates.d.ts.map", "sizeBytes": 2386, - "sha256": "7561c84233a0db5dd6e75d2a49d434e6455e8964a96d997bfc712f089551b041" + "sha256": "96c5a9fd931ca11a188642835b6251621a9789e87dda53c25ea51fa075de7f96" }, { "path": "dist/cli/commands/templates.js.map", - "sizeBytes": 15860, - "sha256": "0c5b3ad523af04aec5a57deb83c7681c5f3ddd8d241664ce6f43c71e10769e04" + "sizeBytes": 15413, + "sha256": "ab60882bcfd691ec1dc5682b7d3beb1737cab4d1f942b37264ba6236d080cd60" }, { "path": "dist/alphalib/types/robots/text-speak.d.ts.map", @@ -1854,12 +2079,12 @@ { "path": "dist/Transloadit.d.ts.map", "sizeBytes": 6679, - "sha256": "ee51b85a546a35f49fd8512705d9bd090d704edd94757ed6f457b882e9bc2396" + "sha256": "319e3cf611757159752a324d59ca0f6fa02a8218e32e61c8ffb103764812a9e0" }, { "path": "dist/Transloadit.js.map", "sizeBytes": 27586, - "sha256": "9fd1ee82626e9e2452ec799d3a8ae775f4a7c1fd9b99d9703f7e3e2bd0b3d191" + "sha256": "409d5759a0e57719a00e5ab6314a89a49aa083e6bc335078e4e12fb9c046a41c" }, { "path": "dist/alphalib/tryCatch.d.ts.map", @@ -2053,8 +2278,8 @@ }, { "path": "README.md", - "sizeBytes": 36476, - "sha256": "62cf02f92243b72419d266b5e94adc7f06cbf55fc6155c5ecf67115afdc47635" + "sizeBytes": 81217, + "sha256": "4c190c948f0dbdc521fd2163675df8da755274846f73106721b352694c3e2914" }, { "path": "dist/alphalib/types/robots/_index.d.ts", @@ -2083,8 +2308,8 @@ }, { "path": "src/alphalib/types/robots/ai-chat.ts", - "sizeBytes": 10448, - "sha256": "4140d851971693e1aebdc675e71ffcdc98e7f9b31fcfa3c86f1ef8a6a5a8142b" + "sizeBytes": 10509, + "sha256": "afe48e03cd1a3eb5544d1d4dd8fc4fb782dff46fc38cc89a25db8eaee2ffdf3e" }, { "path": "dist/ApiError.d.ts", @@ -2108,13 +2333,13 @@ }, { "path": "dist/cli/commands/assemblies.d.ts", - "sizeBytes": 4342, - "sha256": "df6486047bbd89862b7cb433d05f63a128c1fad4520df978842adcecd4f17503" + "sizeBytes": 4598, + "sha256": "2bfe2853f48b9a9e828d909e4898b5ce39bee0ed8e80bf2bd188dac8fe5f424b" }, { "path": "src/cli/commands/assemblies.ts", - "sizeBytes": 50948, - "sha256": "d2a9de8dbd22233785a9880537ece31c0123b1959a24048b50b87c8a759db10e" + "sizeBytes": 55554, + "sha256": "cbafba38543a1cc993d9d29b8ac26a3fc221a04662fae3e358d5ee2f7b0b6a43" }, { "path": "dist/alphalib/types/assembliesGet.d.ts", @@ -2368,13 +2593,13 @@ }, { "path": "dist/cli.d.ts", - "sizeBytes": 256, - "sha256": "c0b85d46fb05f111ab4b71bf0adc491e71b78efd5b5344b74599e4126477979b" + "sizeBytes": 265, + "sha256": "84c403d5b19a2a87189fdf87a6a3b9d4f9dc23ff497f55ebacce6b72669adf8e" }, { "path": "src/cli.ts", - "sizeBytes": 1101, - "sha256": "9f7fa1f5565e87ffdf37abd416e6e77661d3cdba15513ae37fc9a5952a24abc0" + "sizeBytes": 1170, + "sha256": "757c3922b27c1d9c7fb2a496a66be1af298ed86b3e492fed6f43f7f08db1c8e0" }, { "path": "dist/alphalib/types/robots/cloudfiles-import.d.ts", @@ -2546,6 +2771,16 @@ "sizeBytes": 2785, "sha256": "40d7a8b6567fd80057781d7a24f093e9e1918ee45c3191085ee1bc256f9eeb44" }, + { + "path": "dist/ensureUniqueCounter.d.ts", + "sizeBytes": 350, + "sha256": "adc53cdb89d6f8560f7c632422fe7afa8b6d62e3cd33c7fa8645be7c3d4b193d" + }, + { + "path": "src/ensureUniqueCounter.ts", + "sizeBytes": 1646, + "sha256": "adce7911d379aa83abead36dc209293110fe475b691092ea7ec63d704c40f7df" + }, { "path": "dist/alphalib/types/robots/file-compress.d.ts", "sizeBytes": 20135, @@ -2646,6 +2881,16 @@ "sizeBytes": 2068, "sha256": "08af2039f3e568d27b91508b8002ce2ee19714817d69360a4e942cf27f820657" }, + { + "path": "dist/cli/fileProcessingOptions.d.ts", + "sizeBytes": 2885, + "sha256": "805e3582fd29fb1d0e00343150f3ecb06aa53194d289ce1d5ae31632fb11d80b" + }, + { + "path": "src/cli/fileProcessingOptions.ts", + "sizeBytes": 8747, + "sha256": "742f0c0acfd7c45fb9f69448fcb805e21edf7998a97b81400cc5d35cb3223d86" + }, { "path": "dist/alphalib/types/robots/ftp-import.d.ts", "sizeBytes": 10382, @@ -2666,6 +2911,16 @@ "sizeBytes": 4197, "sha256": "1bbaa2361cc3675a29178cbd0f4fcecaad1033032f154a6da36c5c677a9c9447" }, + { + "path": "dist/cli/generateIntentDocs.d.ts", + "sizeBytes": 59, + "sha256": "62a1df25d0d6a23b5c59ea877104bd2633759d655e526f1d8be6dde068dca46e" + }, + { + "path": "src/cli/generateIntentDocs.ts", + "sizeBytes": 12046, + "sha256": "4e85ded91f8ffa1ce8321d691b7b7941b916313266c3346dc22d23c8178bf59f" + }, { "path": "dist/alphalib/types/robots/google-import.d.ts", "sizeBytes": 9781, @@ -2796,6 +3051,16 @@ "sizeBytes": 28301, "sha256": "23a89aaa7f7e7721eac3e7dafb1f82fdb5c1277dc30d85cf3e3364c478e45151" }, + { + "path": "dist/cli/semanticIntents/imageDescribe.d.ts", + "sizeBytes": 1971, + "sha256": "c9f9f5d960aa948ce6ee36c7b617cbd157082b581d5a280528e634bc812e170e" + }, + { + "path": "src/cli/semanticIntents/imageDescribe.ts", + "sizeBytes": 8077, + "sha256": "8651a889a6347ca3395d4559bed5c53930450e62c97a28849d7e72f3f3982054" + }, { "path": "dist/InconsistentResponseError.d.ts", "sizeBytes": 138, @@ -2811,20 +3076,80 @@ "sizeBytes": 110, "sha256": "8138bd76ab0a7ad7dc62b74d654fd7335de2fa86e1fb58f34788df74005ccc2d" }, + { + "path": "dist/cli/semanticIntents/index.d.ts", + "sizeBytes": 904, + "sha256": "de820295e77eb98c6aba606d3bcfe8fe81fac231eff1eda173696bbe4bf29243" + }, { "path": "src/cli/commands/index.ts", - "sizeBytes": 2044, - "sha256": "b6752fa800c6a91e662b75a0c0973f0ba513f263d4a96d5e46a0d3e1f1a9f828" + "sizeBytes": 2195, + "sha256": "3a4b178bd5147f2621f7daa679081550ca410dc1c3e003d7bb9c55e525d05a7a" + }, + { + "path": "src/cli/semanticIntents/index.ts", + "sizeBytes": 1495, + "sha256": "63bd75a504db79877cd581c9404b8ad89f96242ece6edd02512f7203f8bf59d7" }, { "path": "dist/inputFiles.d.ts", - "sizeBytes": 1294, - "sha256": "dd490923c8af01790b1a7c72cd6578312a0af78ee035cc5fca55e24738d87fc1" + "sizeBytes": 1626, + "sha256": "d0bdfc4f2deca766146132c17e39c9457237d06db69ee13cffcd672c9ccc64c3" }, { "path": "src/inputFiles.ts", - "sizeBytes": 8411, - "sha256": "0df54cb83ac5c718f3d3f78ffb77a31d485e2ab5f0a9d91b4f64852e72d1a589" + "sizeBytes": 16515, + "sha256": "d9a6e9672639c307af6d2081dcbc2afa69789bf94b86a7336a63a0a5091c9f05" + }, + { + "path": "dist/cli/intentCommands.d.ts", + "sizeBytes": 716, + "sha256": "8b475be91c4bd98108fe0901ddcc9f6b6ef6f2efcd3056086e857e8fc99f0dc0" + }, + { + "path": "src/cli/intentCommands.ts", + "sizeBytes": 14926, + "sha256": "3c438cbadfdbbc4fc6b3c4bccc99dbdff443684d2a1fd4ccf85a9602560ff149" + }, + { + "path": "dist/cli/intentCommandSpecs.d.ts", + "sizeBytes": 1548, + "sha256": "ab4516be3b7a30603d7f88c22d783f6ecc99c70f3a045655f96d14a03022cd44" + }, + { + "path": "src/cli/intentCommandSpecs.ts", + "sizeBytes": 7910, + "sha256": "ee7f5b712b51821894001b39327db289dc770fb1aaa24f21bd0069d6fe174b7b" + }, + { + "path": "dist/cli/intentFields.d.ts", + "sizeBytes": 1018, + "sha256": "ed26c725a670f6c1b25c69863781aa1e423b9d4759fd4198dcf42b0fcbadd2bf" + }, + { + "path": "src/cli/intentFields.ts", + "sizeBytes": 9355, + "sha256": "d104d57e54c3e17ad297449318a687c66d55089d9b2d8da279fbf20be5832a10" + }, + { + "path": "dist/cli/intentInputPolicy.d.ts", + "sizeBytes": 333, + "sha256": "d44f15f350569ae0cce2ab042d52a086870d9cdfac36ddc8b10fa64f1c20ec3b" + }, + { + "path": "src/cli/intentInputPolicy.ts", + "sizeBytes": 275, + "sha256": "915772425ea5a963f79b42c13d95077733ea173910e0156a3b93964714c52ead" + }, + { + "path": "dist/cli/intentRuntime.d.ts", + "sizeBytes": 5649, + "sha256": "5b702a47758cf6f43fbd8c2f90f9cecc15c7c80ce0cc69abb4ed377d044c1ee0" + }, + { + "path": "src/cli/intentRuntime.ts", + "sizeBytes": 20973, + "sha256": "d7e01809348fbf78f710eb0597b03e0e68507130c8b85bbd369649b96da3841d" }, { "path": "src/alphalib/typings/json-to-ast.d.ts", @@ -2851,6 +3176,16 @@ "sizeBytes": 2512, "sha256": "a6654b2dfc145fece2f4d2881a46e043187a5ada28b4eee52ea577666404b018" }, + { + "path": "dist/cli/semanticIntents/markdownPdf.d.ts", + "sizeBytes": 270, + "sha256": "17e49653f7ffff068fe48b06da554fcb39ed772ac1fad27602c849d810d5419c" + }, + { + "path": "src/cli/semanticIntents/markdownPdf.ts", + "sizeBytes": 3729, + "sha256": "f490142c883fcc314e03f677140f5d8b1476ca6581446c04c540b334a62e35a0" + }, { "path": "dist/alphalib/mcache.d.ts", "sizeBytes": 1881, @@ -2951,6 +3286,16 @@ "sizeBytes": 1505, "sha256": "43cc950855aa6e24d9a4105a3b9e2afcbeac8477797ff5d46bb9e9a6c8adacf1" }, + { + "path": "dist/cli/semanticIntents/parsing.d.ts", + "sizeBytes": 466, + "sha256": "85271f8ec364b5ac2f4c7409c2d1d4ed70caeebc0aa152cc6de9038b383d4b60" + }, + { + "path": "src/cli/semanticIntents/parsing.ts", + "sizeBytes": 1240, + "sha256": "b4e8d53cebe0e9f497e11bb03382af573d9e1a28745a13f919af40d5d77d7b39" + }, { "path": "dist/PollingTimeoutError.d.ts", "sizeBytes": 144, @@ -2971,6 +3316,26 @@ "sizeBytes": 1325, "sha256": "0591686d6c3787e0af4821649506d88034d3f302b021969dc91d612f7e9b3e8b" }, + { + "path": "dist/cli/resultFiles.d.ts", + "sizeBytes": 583, + "sha256": "da44f8a6f5f49b02e312ccd593a1077790fc023c0f1c733f25b4a23370209bd7" + }, + { + "path": "src/cli/resultFiles.ts", + "sizeBytes": 2294, + "sha256": "04d97b91032341c07788a07cfc3e31087bc852bed03e111169b9451fb5a6d641" + }, + { + "path": "dist/cli/resultUrls.d.ts", + "sizeBytes": 776, + "sha256": "b68efc4140cb3674d10c47bdc0e7ebd2fd2c1f2504661c6daad8829d8267deb3" + }, + { + "path": "src/cli/resultUrls.ts", + "sizeBytes": 1860, + "sha256": "230318a00385b69090feca3b57a286c7c29eaedc22c83db3b181ce3bffab8bf1" + }, { "path": "dist/robots.d.ts", "sizeBytes": 1050, @@ -3071,6 +3436,16 @@ "sizeBytes": 43731, "sha256": "c4f7bf2fcc33a453676b2bc96d6f6ff509be46b70fbf772de597cb8e3c40de21" }, + { + "path": "dist/cli/stepsInput.d.ts", + "sizeBytes": 262, + "sha256": "9ac695030494a59b06198beeed3d6ee1d7035703973f4ea0d48b04fcaf80acbb" + }, + { + "path": "src/cli/stepsInput.ts", + "sizeBytes": 1118, + "sha256": "3445d96c05ded3350346afd74f9a8b6082aebfd86a26b4be93058f376b900663" + }, { "path": "dist/alphalib/types/robots/supabase-import.d.ts", "sizeBytes": 12627, @@ -3158,8 +3533,8 @@ }, { "path": "src/cli/commands/templates.ts", - "sizeBytes": 17942, - "sha256": "c592539b044992c343abc91ef53fc9a2b5acbe0f8720ef525c424961df4e1975" + "sizeBytes": 17465, + "sha256": "ce9b61226de00d1e2b22bd31990b2c9ff506d2b1d97ff289032bc5f2ee3ca23c" }, { "path": "dist/alphalib/types/robots/text-speak.d.ts", @@ -3214,12 +3589,12 @@ { "path": "dist/Transloadit.d.ts", "sizeBytes": 12397, - "sha256": "b1e9233014c13c47832c7fb8b2c82bc75e1b3519f259b3ce71f9bd6d8150f36d" + "sha256": "b5d21acd74ea575bc5c9820ba48d736cd0f44a025f4981aa22d4085007fdf736" }, { "path": "src/Transloadit.ts", "sizeBytes": 42665, - "sha256": "d8a3d50a5f245e79258bada7ca39cc9aaedbe430b521145c819b0d46d3fcb1bf" + "sha256": "c6fc410d37595c38306b6e73ca5ff7aa3ea56a2571f23f6800c4f46875df87e4" }, { "path": "dist/alphalib/tryCatch.d.ts", diff --git a/docs/fingerprint/transloadit-baseline.package.json b/docs/fingerprint/transloadit-baseline.package.json index b1621636..5fdcc4f0 100644 --- a/docs/fingerprint/transloadit-baseline.package.json +++ b/docs/fingerprint/transloadit-baseline.package.json @@ -36,6 +36,7 @@ "@aws-sdk/s3-request-presigner": "^3.891.0", "@transloadit/sev-logger": "^0.1.9", "@transloadit/utils": "^4.3.0", + "cacheable-lookup": "^7.0.0", "clipanion": "^4.0.0-rc.4", "debug": "^4.4.3", "dotenv": "^17.2.3", @@ -70,19 +71,20 @@ "src": "./src" }, "scripts": { - "check": "yarn lint:ts && yarn fix && yarn test:unit", + "check": "yarn sync:intent-docs && yarn lint:ts && yarn test:unit", + "sync:intent-docs": "node src/cli/generateIntentDocs.ts", "fix:js": "biome check --write .", "lint:ts": "yarn --cwd ../.. tsc:node", "fix:js:unsafe": "biome check --write . --unsafe", "lint:js": "biome check .", - "lint": "npm-run-all --parallel 'lint:js'", - "fix": "npm-run-all --serial 'fix:js'", + "lint": "yarn lint:js", + "fix": "yarn fix:js", "lint:deps": "knip --dependencies --no-progress", "fix:deps": "knip --dependencies --no-progress --fix", "prepack": "node ../../scripts/prepare-transloadit.ts", - "test:unit": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage ./test/unit", - "test:e2e": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run ./test/e2e", - "test": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage" + "test:unit": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage --passWithNoTests ./test/unit", + "test:e2e": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --passWithNoTests ./test/e2e", + "test": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage --passWithNoTests" }, "license": "MIT", "main": "./dist/Transloadit.js", diff --git a/knip.ts b/knip.ts index 85ee3de1..aa823c6b 100644 --- a/knip.ts +++ b/knip.ts @@ -75,6 +75,7 @@ const config: KnipConfig = { 'got', 'into-stream', 'is-stream', + 'cacheable-lookup', 'node-watch', 'p-map', 'p-queue', diff --git a/packages/node/README.md b/packages/node/README.md index 1540e3f3..b48854c1 100644 --- a/packages/node/README.md +++ b/packages/node/README.md @@ -84,7 +84,880 @@ npx -y transloadit auth token --aud mcp --scope assemblies:write,templates:read ### Processing Media -Create Assemblies to process files using Assembly Instructions (steps) or Templates: +For common one-off tasks, prefer the intent-first commands: + +The full generated intent reference also lives in [`docs/intent-commands.md`](./docs/intent-commands.md). + + + +#### At a glance + +Intent commands are the fastest path to common one-off tasks from the CLI. +Use `--print-urls` when you want temporary result URLs without downloading locally. +All intent commands also support the global CLI flags `--json`, `--log-level`, `--endpoint`, and `--help`. + +| Command | What it does | Input | Output | +| --- | --- | --- | --- | +| `image generate` | Generate images from text prompts | none | file | +| `preview generate` | Generate a preview thumbnail | file, dir, URL, base64 | file | +| `image remove-background` | Remove the background from images | file, dir, URL, base64 | file | +| `image optimize` | Optimize images without quality loss | file, dir, URL, base64 | file | +| `image resize` | Convert, resize, or watermark images | file, dir, URL, base64 | file | +| `document convert` | Convert documents into different formats | file, dir, URL, base64 | file | +| `document optimize` | Reduce PDF file size | file, dir, URL, base64 | file | +| `document auto-rotate` | Auto-rotate documents to the correct orientation | file, dir, URL, base64 | file | +| `document thumbs` | Extract thumbnail images from documents | file, dir, URL, base64 | directory | +| `audio waveform` | Generate waveform images from audio | file, dir, URL, base64 | file | +| `text speak` | Speak text | file, dir, URL, base64 | file | +| `video thumbs` | Extract thumbnails from videos | file, dir, URL, base64 | directory | +| `video encode-hls` | Run builtin/encode-hls-video@latest | file, dir, URL, base64 | directory | +| `image describe` | Describe images as labels or publishable text fields | file, dir, URL, base64 | file | +| `markdown pdf` | Render Markdown files as PDFs | file, dir, URL, base64 | file | +| `markdown docx` | Render Markdown files as DOCX documents | file, dir, URL, base64 | file | +| `file compress` | Compress files | file, dir, URL, base64 | file | +| `file decompress` | Decompress archives | file, dir, URL, base64 | directory | + +> At least one of `--out` or `--print-urls` is required on every intent command. + +#### Shared flags + +These flags are available across many intent commands, so the per-command sections below focus on differences. + +**Shared file input & output flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--input, -i` | `path \| dir \| url \| -` | varies | `input.file` | Provide an input path, directory, URL, or - for stdin | +| `--input-base64` | `base64 \| data URL` | no | `data:text/plain;base64,SGVsbG8=` | Provide base64-encoded input content directly | +| `--out, -o` | `path` | yes* | `output.file` | Write the result to this path or directory | +| `--print-urls` | `boolean` | no | `false` | Print temporary result URLs after completion | + +**Shared no-input output flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--out, -o` | `path` | yes* | `output.file` | Write the result to this path | +| `--print-urls` | `boolean` | no | `false` | Print temporary result URLs after completion | + +**Shared processing flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--recursive, -r` | `boolean` | no | `false` | Enumerate input directories recursively | +| `--delete-after-processing, -d` | `boolean` | no | `false` | Delete input files after they are processed | +| `--reprocess-stale` | `boolean` | no | `false` | Process inputs even if output is newer | + +**Shared watch flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--watch, -w` | `boolean` | no | `false` | Watch inputs for changes | +| `--concurrency, -c` | `number` | no | `5` | Maximum number of concurrent assemblies (default: 5) | + +**Shared bundling flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--single-assembly` | `boolean` | no | `false` | Pass all input files to a single assembly instead of one assembly per file | + +#### `image generate` + +Generate images from text prompts + +Runs `/image/generate` and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image generate [options] +``` + +**Quick facts** + +- Input: none +- Output: file +- Execution: no input +- Backend: `/image/generate` + +**Shared flags** + +- Uses the shared output flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--model` | `string` | no | `value` | The AI model to use for image generation. Defaults to google/nano-banana. | +| `--prompt` | `string` | yes | `"A red bicycle in a studio"` | The prompt describing the desired image content. | +| `--format` | `string` | no | `jpg` | Format of the generated image. | +| `--seed` | `number` | no | `1` | Seed for the random number generator. | +| `--aspect-ratio` | `string` | no | `value` | Aspect ratio of the generated image. | +| `--height` | `number` | no | `1` | Height of the generated image. | +| `--width` | `number` | no | `1` | Width of the generated image. | +| `--style` | `string` | no | `value` | Style of the generated image. | +| `--num-outputs` | `number` | no | `1` | Number of image variants to generate. | + +**Examples** + +```bash +transloadit image generate --prompt "A red bicycle in a studio" --out output.png +``` + +#### `preview generate` + +Generate a preview thumbnail + +Runs `/file/preview` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit preview generate --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/file/preview` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `jpg` | The output format for the generated thumbnail image. If a short video clip is generated using the clip strategy, its format is defined by clip_format. | +| `--width` | `number` | no | `1` | Width of the thumbnail, in pixels. | +| `--height` | `number` | no | `1` | Height of the thumbnail, in pixels. | +| `--resize-strategy` | `string` | no | `crop` | To achieve the desired dimensions of the preview thumbnail, the Robot might have to resize the generated image. | +| `--background` | `string` | no | `value` | The hexadecimal code of the color used to fill the background (only used for the pad resize strategy). | +| `--strategy` | `json` | no | `value` | Definition of the thumbnail generation process per file category. | +| `--artwork-outer-color` | `string` | no | `value` | The color used in the outer parts of the artwork's gradient. | +| `--artwork-center-color` | `string` | no | `value` | The color used in the center of the artwork's gradient. | +| `--waveform-center-color` | `string` | no | `value` | The color used in the center of the waveform's gradient. The format is #rrggbb[aa] (red, green, blue, alpha). Only used if the waveform strategy for audio files is applied. | +| `--waveform-outer-color` | `string` | no | `value` | The color used in the outer parts of the waveform's gradient. The format is #rrggbb[aa] (red, green, blue, alpha). Only used if the waveform strategy for audio files is applied. | +| `--waveform-height` | `number` | no | `1` | Height of the waveform, in pixels. Only used if the waveform strategy for audio files is applied. It can be utilized to ensure that the waveform only takes up a section of the… | +| `--waveform-width` | `number` | no | `1` | Width of the waveform, in pixels. Only used if the waveform strategy for audio files is applied. It can be utilized to ensure that the waveform only takes up a section of the… | +| `--icon-style` | `string` | no | `square` | The style of the icon generated if the icon strategy is applied. | +| `--icon-text-color` | `string` | no | `value` | The color of the text used in the icon. The format is #rrggbb[aa]. Only used if the icon strategy is applied. | +| `--icon-text-font` | `string` | no | `value` | The font family of the text used in the icon. Only used if the icon strategy is applied. Here is a list of all supported fonts. | +| `--icon-text-content` | `string` | no | `extension` | The content of the text box in generated icons. Only used if the icon_style parameter is set to with-text. The default value, extension, adds the file extension (e.g. MP4, JPEG)… | +| `--optimize` | `boolean` | no | `true` | Specifies whether the generated preview image should be optimized to reduce the image's file size while keeping their quaility. | +| `--optimize-priority` | `string` | no | `compression-ratio` | Specifies whether conversion speed or compression ratio is prioritized when optimizing images. | +| `--optimize-progressive` | `boolean` | no | `true` | Specifies whether images should be interlaced, which makes the result image load progressively in browsers. | +| `--clip-format` | `string` | no | `apng` | The animated image format for the generated video clip. Only used if the clip strategy for video files is applied. Please consult the MDN Web Docs for detailed information about… | +| `--clip-offset` | `number` | no | `1` | The start position in seconds of where the clip is cut. Only used if the clip strategy for video files is applied. Be aware that for larger video only the first few MBs of the… | +| `--clip-duration` | `number` | no | `1` | The duration in seconds of the generated video clip. Only used if the clip strategy for video files is applied. Be aware that a longer clip duration also results in a larger file… | +| `--clip-framerate` | `number` | no | `1` | The framerate of the generated video clip. Only used if the clip strategy for video files is applied. Be aware that a higher framerate appears smoother but also results in a… | +| `--clip-loop` | `boolean` | no | `true` | Specifies whether the generated animated image should loop forever (true) or stop after playing the animation once (false). | + +**Examples** + +```bash +transloadit preview generate --input input.file --out output.file +``` + +#### `image remove-background` + +Remove the background from images + +Runs `/image/bgremove` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image remove-background --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/bgremove` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--select` | `string` | no | `foreground` | Region to select and keep in the image. The other region is removed. | +| `--format` | `string` | no | `png` | Format of the generated image. | +| `--provider` | `string` | no | `aws` | Provider to use for removing the background. | +| `--model` | `string` | no | `value` | Provider-specific model to use for removing the background. Mostly intended for testing and evaluation. | + +**Examples** + +```bash +transloadit image remove-background --input input.png --out output.png +``` + +#### `image optimize` + +Optimize images without quality loss + +Runs `/image/optimize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image optimize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/optimize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--priority` | `string` | no | `compression-ratio` | Provides different algorithms for better or worse compression for your images, but that run slower or faster. | +| `--progressive` | `boolean` | no | `true` | Interlaces the image if set to true, which makes the result image load progressively in browsers. | +| `--preserve-meta-data` | `boolean` | no | `true` | Specifies if the image's metadata should be preserved during the optimization, or not. | +| `--fix-breaking-images` | `boolean` | no | `true` | If set to true this parameter tries to fix images that would otherwise make the underlying tool error out and thereby break your Assemblies . | + +**Examples** + +```bash +transloadit image optimize --input input.png --out output.png +``` + +#### `image resize` + +Convert, resize, or watermark images + +Runs `/image/resize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image resize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/resize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `value` | The output format for the modified image. Some of the most important available formats are "jpg", "png", "gif", and "tiff". For a complete lists of all formats that we can write… | +| `--width` | `number` | no | `1` | Width of the result in pixels. If not specified, will default to the width of the original. | +| `--height` | `number` | no | `1` | Height of the new image, in pixels. If not specified, will default to the height of the input image. | +| `--resize-strategy` | `string` | no | `crop` | See the list of available resize strategies. | +| `--zoom` | `boolean` | no | `true` | If this is set to false, smaller images will not be stretched to the desired width and height. | +| `--crop` | `auto` | no | `value` | Specify an object containing coordinates for the top left and bottom right corners of the rectangle to be cropped from the original image(s). | +| `--gravity` | `string` | no | `bottom` | The direction from which the image is to be cropped, when "resize_strategy" is set to "crop", but no crop coordinates are defined. | +| `--strip` | `boolean` | no | `true` | Strips all metadata from the image. This is useful to keep thumbnails as small as possible. | +| `--alpha` | `string` | no | `Activate` | Gives control of the alpha/matte channel of an image. | +| `--preclip-alpha` | `string` | no | `Activate` | Gives control of the alpha/matte channel of an image before applying the clipping path via clip: true. | +| `--flatten` | `boolean` | no | `true` | Flattens all layers onto the specified background to achieve better results from transparent formats to non-transparent formats, as explained in the ImageMagick documentation. | +| `--correct-gamma` | `boolean` | no | `true` | Prevents gamma errors common in many image scaling algorithms. | +| `--quality` | `number` | no | `1` | Controls the image compression for JPG and PNG images. Please also take a look at 🤖/image/optimize. | +| `--adaptive-filtering` | `boolean` | no | `true` | Controls the image compression for PNG images. Setting to true results in smaller file size, while increasing processing time. It is encouraged to keep this option disabled. | +| `--background` | `string` | no | `transparent` | Either the hexadecimal code or name of the color used to fill the background (used for the pad resize strategy). | +| `--frame` | `number` | no | `1` | Use this parameter when dealing with animated GIF files to specify which frame of the GIF is used for the operation. | +| `--colorspace` | `string` | no | `CMY` | Sets the image colorspace. For details about the available values, see the ImageMagick documentation. Please note that if you were using "RGB", we recommend using "sRGB" instead… | +| `--type` | `string` | no | `Bilevel` | Sets the image color type. For details about the available values, see the ImageMagick documentation. If you're using colorspace, ImageMagick might try to find the most efficient… | +| `--sepia` | `number` | no | `1` | Applies a sepia tone effect in percent. | +| `--rotation` | `auto` | no | `auto` | Determines whether the image should be rotated. Use any number to specify the rotation angle in degrees (e.g., 90, 180, 270, 360, or precise values like 2.9). Use the value true… | +| `--compress` | `string` | no | `BZip` | Specifies pixel compression for when the image is written. Compression is disabled by default. Please also take a look at 🤖/image/optimize. | +| `--blur` | `string` | no | `value` | Specifies gaussian blur, using a value with the form {radius}x{sigma}. | +| `--blur-regions` | `json` | no | `value` | Specifies an array of ellipse objects that should be blurred on the image. | +| `--brightness` | `number` | no | `1` | Increases or decreases the brightness of the image by using a multiplier. For example 1.5 would increase the brightness by 50%, and 0.75 would decrease the brightness by 25%. | +| `--saturation` | `number` | no | `1` | Increases or decreases the saturation of the image by using a multiplier. For example 1.5 would increase the saturation by 50%, and 0.75 would decrease the saturation by 25%. | +| `--hue` | `number` | no | `1` | Changes the hue by rotating the color of the image. The value 100 would produce no change whereas 0 and 200 will negate the colors in the image. | +| `--contrast` | `number` | no | `1` | Adjusts the contrast of the image. A value of 1 produces no change. Values below 1 decrease contrast (with 0 being minimum contrast), and values above 1 increase contrast (with 2… | +| `--watermark-url` | `string` | no | `value` | A URL indicating a PNG image to be overlaid above this image. | +| `--watermark-position` | `string[]` | no | `bottom` | The position at which the watermark is placed. The available options are "center", "top", "bottom", "left", and "right". You can also combine options, such as "bottom-right". An… | +| `--watermark-x-offset` | `number` | no | `1` | The x-offset in number of pixels at which the watermark will be placed in relation to the position it has due to watermark_position. | +| `--watermark-y-offset` | `number` | no | `1` | The y-offset in number of pixels at which the watermark will be placed in relation to the position it has due to watermark_position. | +| `--watermark-size` | `string` | no | `value` | The size of the watermark, as a percentage. For example, a value of "50%" means that size of the watermark will be 50% of the size of image on which it is placed. The exact… | +| `--watermark-resize-strategy` | `string` | no | `area` | Available values are "fit", "min_fit", "stretch" and "area". | +| `--watermark-opacity` | `number` | no | `1` | The opacity of the watermark, where 0.0 is fully transparent and 1.0 is fully opaque. | +| `--watermark-repeat-x` | `boolean` | no | `true` | When set to true, the watermark will be repeated horizontally across the entire width of the image. | +| `--watermark-repeat-y` | `boolean` | no | `true` | When set to true, the watermark will be repeated vertically across the entire height of the image. | +| `--text` | `json` | no | `value` | Text overlays to be applied to the image. Can be either a single text object or an array of text objects. Each text object contains text rules. The following text parameters are… | +| `--progressive` | `boolean` | no | `true` | Interlaces the image if set to true, which makes the image load progressively in browsers. | +| `--transparent` | `string` | no | `transparent` | Make this color transparent within the image. Example: "255,255,255". | +| `--trim-whitespace` | `boolean` | no | `true` | This determines if additional whitespace around the image should first be trimmed away. | +| `--clip` | `auto` | no | `value` | Apply the clipping path to other operations in the resize job, if one is present. | +| `--negate` | `boolean` | no | `true` | Replace each pixel with its complementary color, effectively negating the image. Especially useful when testing clipping. | +| `--density` | `string` | no | `value` | While in-memory quality and file format depth specifies the color resolution, the density of an image is the spatial (space) resolution of the image. | +| `--monochrome` | `boolean` | no | `true` | Transform the image to black and white. This is a shortcut for setting the colorspace to Gray and type to Bilevel. | +| `--shave` | `auto` | no | `value` | Shave pixels from the image edges. The value should be in the format width or widthxheight to specify the number of pixels to remove from each side. | + +**Examples** + +```bash +transloadit image resize --input input.png --out output.png +``` + +#### `document convert` + +Convert documents into different formats + +Runs `/document/convert` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document convert --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/convert` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | yes | `pdf` | The desired format for document conversion. | +| `--markdown-format` | `string` | no | `commonmark` | Markdown can be represented in several variants, so when using this Robot to transform Markdown into HTML please specify which revision is being used. | +| `--markdown-theme` | `string` | no | `bare` | This parameter overhauls your Markdown files styling based on several canned presets. | +| `--pdf-margin` | `string` | no | `value` | PDF Paper margins, separated by , and with units. We support the following unit values: px, in, cm, mm. Currently this parameter is only supported when converting from html. | +| `--pdf-print-background` | `boolean` | no | `true` | Print PDF background graphics. Currently this parameter is only supported when converting from html. | +| `--pdf-format` | `string` | no | `A0` | PDF paper format. Currently this parameter is only supported when converting from html. | +| `--pdf-display-header-footer` | `boolean` | no | `true` | Display PDF header and footer. Currently this parameter is only supported when converting from html. | +| `--pdf-header-template` | `string` | no | `value` | HTML template for the PDF print header. Should be valid HTML markup with following classes used to inject printing values into them: - date formatted print date - title document… | +| `--pdf-footer-template` | `string` | no | `value` | HTML template for the PDF print footer. Should use the same format as the pdf_header_template. Currently this parameter is only supported when converting from html, and requires… | + +**Examples** + +```bash +transloadit document convert --input input.pdf --format pdf --out output.pdf +``` + +#### `document optimize` + +Reduce PDF file size + +Runs `/document/optimize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document optimize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/optimize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--preset` | `string` | no | `screen` | The quality preset to use for optimization. Each preset provides a different balance between file size and quality: - screen - Lowest quality, smallest file size. Best for screen… | +| `--image-dpi` | `number` | no | `1` | Target DPI (dots per inch) for embedded images. When specified, this overrides the DPI setting from the preset. Higher DPI values result in better image quality but larger file… | +| `--compress-fonts` | `boolean` | no | `true` | Whether to compress embedded fonts. When enabled, fonts are compressed to reduce file size. | +| `--subset-fonts` | `boolean` | no | `true` | Whether to subset embedded fonts, keeping only the glyphs that are actually used in the document. | +| `--remove-metadata` | `boolean` | no | `true` | Whether to strip document metadata (title, author, keywords, etc.) from the PDF. This can provide a small reduction in file size and may be useful for privacy. | +| `--linearize` | `boolean` | no | `true` | Whether to linearize (optimize for Fast Web View) the output PDF. | +| `--compatibility` | `string` | no | `1.4` | The PDF version compatibility level. Lower versions have broader compatibility but fewer features. Higher versions support more advanced features but may not open in older PDF… | + +**Examples** + +```bash +transloadit document optimize --input input.pdf --out output.pdf +``` + +#### `document auto-rotate` + +Auto-rotate documents to the correct orientation + +Runs `/document/autorotate` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document auto-rotate --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/autorotate` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit document auto-rotate --input input.pdf --out output.pdf +``` + +#### `document thumbs` + +Extract thumbnail images from documents + +Runs `/document/thumbs` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit document thumbs --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/thumbs` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--page` | `number` | no | `1` | The PDF page that you want to convert to an image. By default the value is null which means that all pages will be converted into images. | +| `--format` | `string` | no | `jpg` | The format of the extracted image(s). If you specify the value "gif", then an animated gif cycling through all pages is created. Please check out this demo to learn more about… | +| `--delay` | `number` | no | `1` | If your output format is "gif" then this parameter sets the number of 100th seconds to pass before the next frame is shown in the animation. | +| `--width` | `number` | no | `1` | Width of the new image, in pixels. If not specified, will default to the width of the input image | +| `--height` | `number` | no | `1` | Height of the new image, in pixels. If not specified, will default to the height of the input image | +| `--resize-strategy` | `string` | no | `crop` | One of the available resize strategies. | +| `--background` | `string` | no | `value` | Either the hexadecimal code or name of the color used to fill the background (only used for the pad resize strategy). | +| `--alpha` | `string` | no | `Remove` | Change how the alpha channel of the resulting image should work. | +| `--density` | `string` | no | `value` | While in-memory quality and file format depth specifies the color resolution, the density of an image is the spatial (space) resolution of the image. | +| `--antialiasing` | `boolean` | no | `true` | Controls whether or not antialiasing is used to remove jagged edges from text or images in a document. | +| `--colorspace` | `string` | no | `CMY` | Sets the image colorspace. For details about the available values, see the ImageMagick documentation. Please note that if you were using "RGB", we recommend using "sRGB".… | +| `--trim-whitespace` | `boolean` | no | `true` | This determines if additional whitespace around the PDF should first be trimmed away before it is converted to an image. | +| `--pdf-use-cropbox` | `boolean` | no | `true` | Some PDF documents lie about their dimensions. For instance they'll say they are landscape, but when opened in decent Desktop readers, it's really in portrait mode. This can… | +| `--turbo` | `boolean` | no | `true` | If you set this to false, the robot will not emit files as they become available. | + +**Examples** + +```bash +transloadit document thumbs --input input.pdf --out output/ +``` + +#### `audio waveform` + +Generate waveform images from audio + +Runs `/audio/waveform` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit audio waveform --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/audio/waveform` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--ffmpeg` | `json` | no | `value` | A parameter object to be passed to FFmpeg. If a preset is used, the options specified are merged on top of the ones from the preset. For available options, see the FFmpeg… | +| `--format` | `string` | no | `image` | The format of the result file. Can be "image" or "json". If "image" is supplied, a PNG image will be created, otherwise a JSON file. | +| `--width` | `number` | no | `1` | The width of the resulting image if the format "image" was selected. | +| `--height` | `number` | no | `1` | The height of the resulting image if the format "image" was selected. | +| `--antialiasing` | `auto` | no | `0` | Either a value of 0 or 1, or true/false, corresponding to if you want to enable antialiasing to achieve smoother edges in the waveform graph or not. | +| `--background-color` | `string` | no | `value` | The background color of the resulting image in the "rrggbbaa" format (red, green, blue, alpha), if the format "image" was selected. | +| `--center-color` | `string` | no | `value` | The color used in the center of the gradient. The format is "rrggbbaa" (red, green, blue, alpha). | +| `--outer-color` | `string` | no | `value` | The color used in the outer parts of the gradient. The format is "rrggbbaa" (red, green, blue, alpha). | +| `--style` | `string` | no | `v0` | Waveform style version. - "v0": Legacy waveform generation (default). - "v1": Advanced waveform generation with additional parameters. For backwards compatibility, numeric values… | +| `--split-channels` | `boolean` | no | `true` | Available when style is "v1". If set to true, outputs multi-channel waveform data or image files, one per channel. | +| `--zoom` | `number` | no | `1` | Available when style is "v1". Zoom level in samples per pixel. This parameter cannot be used together with pixels_per_second. | +| `--pixels-per-second` | `number` | no | `1` | Available when style is "v1". Zoom level in pixels per second. This parameter cannot be used together with zoom. | +| `--bits` | `number` | no | `8` | Available when style is "v1". Bit depth for waveform data. Can be 8 or 16. | +| `--start` | `number` | no | `1` | Available when style is "v1". Start time in seconds. | +| `--end` | `number` | no | `1` | Available when style is "v1". End time in seconds (0 means end of audio). | +| `--colors` | `string` | no | `audition` | Available when style is "v1". Color scheme to use. Can be "audition" or "audacity". | +| `--border-color` | `string` | no | `value` | Available when style is "v1". Border color in "rrggbbaa" format. | +| `--waveform-style` | `string` | no | `normal` | Available when style is "v1". Waveform style. Can be "normal" or "bars". | +| `--bar-width` | `number` | no | `1` | Available when style is "v1". Width of bars in pixels when waveform_style is "bars". | +| `--bar-gap` | `number` | no | `1` | Available when style is "v1". Gap between bars in pixels when waveform_style is "bars". | +| `--bar-style` | `string` | no | `square` | Available when style is "v1". Bar style when waveform_style is "bars". | +| `--axis-label-color` | `string` | no | `value` | Available when style is "v1". Color for axis labels in "rrggbbaa" format. | +| `--no-axis-labels` | `boolean` | no | `true` | Available when style is "v1". If set to true, renders waveform image without axis labels. | +| `--with-axis-labels` | `boolean` | no | `true` | Available when style is "v1". If set to true, renders waveform image with axis labels. | +| `--amplitude-scale` | `number` | no | `1` | Available when style is "v1". Amplitude scale factor. | +| `--compression` | `number` | no | `1` | Available when style is "v1". PNG compression level: 0 (none) to 9 (best), or -1 (default). Only applicable when format is "image". | + +**Examples** + +```bash +transloadit audio waveform --input input.mp3 --out output.png +``` + +#### `text speak` + +Speak text + +Runs `/text/speak` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit text speak --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/text/speak` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--prompt` | `string` | no | `"A red bicycle in a studio"` | Which text to speak. You can also set this to null and supply an input text file. | +| `--provider` | `string` | yes | `aws` | Which AI provider to leverage. Transloadit outsources this task and abstracts the interface so you can expect the same data structures, but different latencies and information… | +| `--target-language` | `string` | no | `en-US` | The written language of the document. This will also be the language of the spoken text. The language should be specified in the BCP-47 format, such as "en-GB", "de-DE" or… | +| `--voice` | `string` | no | `female-1` | The gender to be used for voice synthesis. Please consult the list of supported languages and voices. | +| `--ssml` | `boolean` | no | `true` | Supply Speech Synthesis Markup Language instead of raw text, in order to gain more control over how your text is voiced, including rests and pronounciations. | + +**Examples** + +```bash +transloadit text speak --input input.pdf --provider aws --out output.mp3 +``` + +#### `video thumbs` + +Extract thumbnails from videos + +Runs `/video/thumbs` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit video thumbs --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/video/thumbs` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--ffmpeg` | `json` | no | `value` | A parameter object to be passed to FFmpeg. If a preset is used, the options specified are merged on top of the ones from the preset. For available options, see the FFmpeg… | +| `--count` | `number` | no | `1` | The number of thumbnails to be extracted. As some videos have incorrect durations, the actual number of thumbnails generated may be less in rare cases. The maximum number of… | +| `--offsets` | `auto` | no | `value` | An array of offsets representing seconds of the file duration, such as [ 2, 45, 120 ]. | +| `--format` | `string` | no | `jpg` | The format of the extracted thumbnail. Supported values are "jpg", "jpeg" and "png". Even if you specify the format to be "jpeg" the resulting thumbnails will have a "jpg" file… | +| `--width` | `number` | no | `1` | The width of the thumbnail, in pixels. Defaults to the original width of the video. | +| `--height` | `number` | no | `1` | The height of the thumbnail, in pixels. Defaults to the original height of the video. | +| `--resize-strategy` | `string` | no | `crop` | One of the available resize strategies. | +| `--background` | `string` | no | `value` | The background color of the resulting thumbnails in the "rrggbbaa" format (red, green, blue, alpha) when used with the "pad" resize strategy. The default color is black. | +| `--rotate` | `number` | no | `0` | Forces the video to be rotated by the specified degree integer. | +| `--input-codec` | `string` | no | `value` | Specifies the input codec to use when decoding the video. This is useful for videos with special codecs that require specific decoders. | + +**Examples** + +```bash +transloadit video thumbs --input input.mp4 --out output/ +``` + +#### `video encode-hls` + +Run builtin/encode-hls-video@latest + +Runs the `builtin/encode-hls-video@latest` template and writes the outputs to `--out`. + +**Usage** + +```bash +npx transloadit video encode-hls --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `builtin/encode-hls-video@latest` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit video encode-hls --input input.mp4 --out output/ +``` + +#### `image describe` + +Describe images as labels or publishable text fields + +Generates image labels through `/image/describe`, or structured altText/title/caption/description through `/ai/chat`, then writes the JSON result to `--out`. + +**Usage** + +```bash +npx transloadit image describe --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `image-describe` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--fields` | `string[]` | no | — | Describe output fields to generate, for example labels or altText,title,caption,description | +| `--for` | `string` | no | — | Use a named output profile, currently: wordpress | +| `--model` | `string` | no | — | Model to use for generated text fields (default: anthropic/claude-4-sonnet-20250514) | + +**Examples** + +```bash +# Describe an image as labels +transloadit image describe --input hero.jpg --out labels.json +# Generate WordPress-ready fields +transloadit image describe --input hero.jpg --for wordpress --out fields.json +# Request a custom field set +transloadit image describe --input hero.jpg --fields altText,title,caption --out fields.json +``` + +#### `markdown pdf` + +Render Markdown files as PDFs + +Runs `/document/convert` with `format: pdf`, letting the backend render Markdown and preserve features such as internal heading links in the generated PDF. + +**Usage** + +```bash +npx transloadit markdown pdf --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `markdown-pdf` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--markdown-format` | `string` | no | — | Markdown variant to parse, either commonmark or gfm | +| `--markdown-theme` | `string` | no | — | Markdown theme to render, either github or bare | + +**Examples** + +```bash +# Render a Markdown file as a PDF file +transloadit markdown pdf --input README.md --out README.pdf +# Print a temporary result URL without downloading locally +transloadit markdown pdf --input README.md --print-urls +``` + +#### `markdown docx` + +Render Markdown files as DOCX documents + +Runs `/document/convert` with `format: docx`, letting the backend render Markdown and convert it into a Word document. + +**Usage** + +```bash +npx transloadit markdown docx --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `markdown-docx` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--markdown-format` | `string` | no | — | Markdown variant to parse, either commonmark or gfm | +| `--markdown-theme` | `string` | no | — | Markdown theme to render, either github or bare | + +**Examples** + +```bash +# Render a Markdown file as a DOCX file +transloadit markdown docx --input README.md --out README.docx +# Print a temporary result URL without downloading locally +transloadit markdown docx --input README.md --print-urls +``` + +#### `file compress` + +Compress files + +Runs `/file/compress` for the provided inputs and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit file compress --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: single assembly +- Backend: `/file/compress` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `zip` | The format of the archive to be created. Supported values are "tar" and "zip". Note that "tar" without setting gzip to true results in an archive that's not compressed in any way. | +| `--gzip` | `boolean` | no | `true` | Determines if the result archive should also be gzipped. Gzip compression is only applied if you use the "tar" format. | +| `--password` | `string` | no | `value` | This allows you to encrypt all archive contents with a password and thereby protect it against unauthorized use. | +| `--compression-level` | `number` | no | `1` | Determines how fiercely to try to compress the archive. -0 is compressionless, which is suitable for media that is already compressed. -1 is fastest with lowest compression. -9… | +| `--file-layout` | `string` | no | `advanced` | Determines if the result archive should contain all files in one directory (value for this is "simple") or in subfolders according to the explanation below (value for this is… | +| `--archive-name` | `string` | no | `value` | The name of the archive file to be created (without the file extension). | + +**Examples** + +```bash +transloadit file compress --input input.file --out output.file +``` + +#### `file decompress` + +Decompress archives + +Runs `/file/decompress` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit file decompress --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/file/decompress` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit file decompress --input input.file --out output/ +``` + + + +For full control, create Assemblies directly using Assembly Instructions (steps) or Templates: ```bash # Process a file using a steps file @@ -858,3 +1731,7 @@ Thanks to [Ian Hansen](https://github.com/supershabam) for donating the `translo ## Development See [CONTRIBUTING](./CONTRIBUTING.md). + + + + diff --git a/packages/node/docs/intent-commands.md b/packages/node/docs/intent-commands.md new file mode 100644 index 00000000..b40c81b9 --- /dev/null +++ b/packages/node/docs/intent-commands.md @@ -0,0 +1,868 @@ +# Intent Command Reference + +> Generated by `yarn workspace @transloadit/node sync:intent-docs`. Do not edit by hand. + +## At a glance + +Intent commands are the fastest path to common one-off tasks from the CLI. +Use `--print-urls` when you want temporary result URLs without downloading locally. +All intent commands also support the global CLI flags `--json`, `--log-level`, `--endpoint`, and `--help`. + +| Command | What it does | Input | Output | +| --- | --- | --- | --- | +| `image generate` | Generate images from text prompts | none | file | +| `preview generate` | Generate a preview thumbnail | file, dir, URL, base64 | file | +| `image remove-background` | Remove the background from images | file, dir, URL, base64 | file | +| `image optimize` | Optimize images without quality loss | file, dir, URL, base64 | file | +| `image resize` | Convert, resize, or watermark images | file, dir, URL, base64 | file | +| `document convert` | Convert documents into different formats | file, dir, URL, base64 | file | +| `document optimize` | Reduce PDF file size | file, dir, URL, base64 | file | +| `document auto-rotate` | Auto-rotate documents to the correct orientation | file, dir, URL, base64 | file | +| `document thumbs` | Extract thumbnail images from documents | file, dir, URL, base64 | directory | +| `audio waveform` | Generate waveform images from audio | file, dir, URL, base64 | file | +| `text speak` | Speak text | file, dir, URL, base64 | file | +| `video thumbs` | Extract thumbnails from videos | file, dir, URL, base64 | directory | +| `video encode-hls` | Run builtin/encode-hls-video@latest | file, dir, URL, base64 | directory | +| `image describe` | Describe images as labels or publishable text fields | file, dir, URL, base64 | file | +| `markdown pdf` | Render Markdown files as PDFs | file, dir, URL, base64 | file | +| `markdown docx` | Render Markdown files as DOCX documents | file, dir, URL, base64 | file | +| `file compress` | Compress files | file, dir, URL, base64 | file | +| `file decompress` | Decompress archives | file, dir, URL, base64 | directory | + +> At least one of `--out` or `--print-urls` is required on every intent command. + +## Shared flags + +These flags are available across many intent commands, so the per-command sections below focus on differences. + +**Shared file input & output flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--input, -i` | `path \| dir \| url \| -` | varies | `input.file` | Provide an input path, directory, URL, or - for stdin | +| `--input-base64` | `base64 \| data URL` | no | `data:text/plain;base64,SGVsbG8=` | Provide base64-encoded input content directly | +| `--out, -o` | `path` | yes* | `output.file` | Write the result to this path or directory | +| `--print-urls` | `boolean` | no | `false` | Print temporary result URLs after completion | + +**Shared no-input output flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--out, -o` | `path` | yes* | `output.file` | Write the result to this path | +| `--print-urls` | `boolean` | no | `false` | Print temporary result URLs after completion | + +**Shared processing flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--recursive, -r` | `boolean` | no | `false` | Enumerate input directories recursively | +| `--delete-after-processing, -d` | `boolean` | no | `false` | Delete input files after they are processed | +| `--reprocess-stale` | `boolean` | no | `false` | Process inputs even if output is newer | + +**Shared watch flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--watch, -w` | `boolean` | no | `false` | Watch inputs for changes | +| `--concurrency, -c` | `number` | no | `5` | Maximum number of concurrent assemblies (default: 5) | + +**Shared bundling flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--single-assembly` | `boolean` | no | `false` | Pass all input files to a single assembly instead of one assembly per file | + +## `image generate` + +Generate images from text prompts + +Runs `/image/generate` and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image generate [options] +``` + +**Quick facts** + +- Input: none +- Output: file +- Execution: no input +- Backend: `/image/generate` + +**Shared flags** + +- Uses the shared output flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--model` | `string` | no | `value` | The AI model to use for image generation. Defaults to google/nano-banana. | +| `--prompt` | `string` | yes | `"A red bicycle in a studio"` | The prompt describing the desired image content. | +| `--format` | `string` | no | `jpg` | Format of the generated image. | +| `--seed` | `number` | no | `1` | Seed for the random number generator. | +| `--aspect-ratio` | `string` | no | `value` | Aspect ratio of the generated image. | +| `--height` | `number` | no | `1` | Height of the generated image. | +| `--width` | `number` | no | `1` | Width of the generated image. | +| `--style` | `string` | no | `value` | Style of the generated image. | +| `--num-outputs` | `number` | no | `1` | Number of image variants to generate. | + +**Examples** + +```bash +transloadit image generate --prompt "A red bicycle in a studio" --out output.png +``` + +## `preview generate` + +Generate a preview thumbnail + +Runs `/file/preview` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit preview generate --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/file/preview` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `jpg` | The output format for the generated thumbnail image. If a short video clip is generated using the clip strategy, its format is defined by clip_format. | +| `--width` | `number` | no | `1` | Width of the thumbnail, in pixels. | +| `--height` | `number` | no | `1` | Height of the thumbnail, in pixels. | +| `--resize-strategy` | `string` | no | `crop` | To achieve the desired dimensions of the preview thumbnail, the Robot might have to resize the generated image. | +| `--background` | `string` | no | `value` | The hexadecimal code of the color used to fill the background (only used for the pad resize strategy). | +| `--strategy` | `json` | no | `value` | Definition of the thumbnail generation process per file category. | +| `--artwork-outer-color` | `string` | no | `value` | The color used in the outer parts of the artwork's gradient. | +| `--artwork-center-color` | `string` | no | `value` | The color used in the center of the artwork's gradient. | +| `--waveform-center-color` | `string` | no | `value` | The color used in the center of the waveform's gradient. The format is #rrggbb[aa] (red, green, blue, alpha). Only used if the waveform strategy for audio files is applied. | +| `--waveform-outer-color` | `string` | no | `value` | The color used in the outer parts of the waveform's gradient. The format is #rrggbb[aa] (red, green, blue, alpha). Only used if the waveform strategy for audio files is applied. | +| `--waveform-height` | `number` | no | `1` | Height of the waveform, in pixels. Only used if the waveform strategy for audio files is applied. It can be utilized to ensure that the waveform only takes up a section of the… | +| `--waveform-width` | `number` | no | `1` | Width of the waveform, in pixels. Only used if the waveform strategy for audio files is applied. It can be utilized to ensure that the waveform only takes up a section of the… | +| `--icon-style` | `string` | no | `square` | The style of the icon generated if the icon strategy is applied. | +| `--icon-text-color` | `string` | no | `value` | The color of the text used in the icon. The format is #rrggbb[aa]. Only used if the icon strategy is applied. | +| `--icon-text-font` | `string` | no | `value` | The font family of the text used in the icon. Only used if the icon strategy is applied. Here is a list of all supported fonts. | +| `--icon-text-content` | `string` | no | `extension` | The content of the text box in generated icons. Only used if the icon_style parameter is set to with-text. The default value, extension, adds the file extension (e.g. MP4, JPEG)… | +| `--optimize` | `boolean` | no | `true` | Specifies whether the generated preview image should be optimized to reduce the image's file size while keeping their quaility. | +| `--optimize-priority` | `string` | no | `compression-ratio` | Specifies whether conversion speed or compression ratio is prioritized when optimizing images. | +| `--optimize-progressive` | `boolean` | no | `true` | Specifies whether images should be interlaced, which makes the result image load progressively in browsers. | +| `--clip-format` | `string` | no | `apng` | The animated image format for the generated video clip. Only used if the clip strategy for video files is applied. Please consult the MDN Web Docs for detailed information about… | +| `--clip-offset` | `number` | no | `1` | The start position in seconds of where the clip is cut. Only used if the clip strategy for video files is applied. Be aware that for larger video only the first few MBs of the… | +| `--clip-duration` | `number` | no | `1` | The duration in seconds of the generated video clip. Only used if the clip strategy for video files is applied. Be aware that a longer clip duration also results in a larger file… | +| `--clip-framerate` | `number` | no | `1` | The framerate of the generated video clip. Only used if the clip strategy for video files is applied. Be aware that a higher framerate appears smoother but also results in a… | +| `--clip-loop` | `boolean` | no | `true` | Specifies whether the generated animated image should loop forever (true) or stop after playing the animation once (false). | + +**Examples** + +```bash +transloadit preview generate --input input.file --out output.file +``` + +## `image remove-background` + +Remove the background from images + +Runs `/image/bgremove` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image remove-background --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/bgremove` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--select` | `string` | no | `foreground` | Region to select and keep in the image. The other region is removed. | +| `--format` | `string` | no | `png` | Format of the generated image. | +| `--provider` | `string` | no | `aws` | Provider to use for removing the background. | +| `--model` | `string` | no | `value` | Provider-specific model to use for removing the background. Mostly intended for testing and evaluation. | + +**Examples** + +```bash +transloadit image remove-background --input input.png --out output.png +``` + +## `image optimize` + +Optimize images without quality loss + +Runs `/image/optimize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image optimize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/optimize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--priority` | `string` | no | `compression-ratio` | Provides different algorithms for better or worse compression for your images, but that run slower or faster. | +| `--progressive` | `boolean` | no | `true` | Interlaces the image if set to true, which makes the result image load progressively in browsers. | +| `--preserve-meta-data` | `boolean` | no | `true` | Specifies if the image's metadata should be preserved during the optimization, or not. | +| `--fix-breaking-images` | `boolean` | no | `true` | If set to true this parameter tries to fix images that would otherwise make the underlying tool error out and thereby break your Assemblies . | + +**Examples** + +```bash +transloadit image optimize --input input.png --out output.png +``` + +## `image resize` + +Convert, resize, or watermark images + +Runs `/image/resize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image resize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/resize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `value` | The output format for the modified image. Some of the most important available formats are "jpg", "png", "gif", and "tiff". For a complete lists of all formats that we can write… | +| `--width` | `number` | no | `1` | Width of the result in pixels. If not specified, will default to the width of the original. | +| `--height` | `number` | no | `1` | Height of the new image, in pixels. If not specified, will default to the height of the input image. | +| `--resize-strategy` | `string` | no | `crop` | See the list of available resize strategies. | +| `--zoom` | `boolean` | no | `true` | If this is set to false, smaller images will not be stretched to the desired width and height. | +| `--crop` | `auto` | no | `value` | Specify an object containing coordinates for the top left and bottom right corners of the rectangle to be cropped from the original image(s). | +| `--gravity` | `string` | no | `bottom` | The direction from which the image is to be cropped, when "resize_strategy" is set to "crop", but no crop coordinates are defined. | +| `--strip` | `boolean` | no | `true` | Strips all metadata from the image. This is useful to keep thumbnails as small as possible. | +| `--alpha` | `string` | no | `Activate` | Gives control of the alpha/matte channel of an image. | +| `--preclip-alpha` | `string` | no | `Activate` | Gives control of the alpha/matte channel of an image before applying the clipping path via clip: true. | +| `--flatten` | `boolean` | no | `true` | Flattens all layers onto the specified background to achieve better results from transparent formats to non-transparent formats, as explained in the ImageMagick documentation. | +| `--correct-gamma` | `boolean` | no | `true` | Prevents gamma errors common in many image scaling algorithms. | +| `--quality` | `number` | no | `1` | Controls the image compression for JPG and PNG images. Please also take a look at 🤖/image/optimize. | +| `--adaptive-filtering` | `boolean` | no | `true` | Controls the image compression for PNG images. Setting to true results in smaller file size, while increasing processing time. It is encouraged to keep this option disabled. | +| `--background` | `string` | no | `transparent` | Either the hexadecimal code or name of the color used to fill the background (used for the pad resize strategy). | +| `--frame` | `number` | no | `1` | Use this parameter when dealing with animated GIF files to specify which frame of the GIF is used for the operation. | +| `--colorspace` | `string` | no | `CMY` | Sets the image colorspace. For details about the available values, see the ImageMagick documentation. Please note that if you were using "RGB", we recommend using "sRGB" instead… | +| `--type` | `string` | no | `Bilevel` | Sets the image color type. For details about the available values, see the ImageMagick documentation. If you're using colorspace, ImageMagick might try to find the most efficient… | +| `--sepia` | `number` | no | `1` | Applies a sepia tone effect in percent. | +| `--rotation` | `auto` | no | `auto` | Determines whether the image should be rotated. Use any number to specify the rotation angle in degrees (e.g., 90, 180, 270, 360, or precise values like 2.9). Use the value true… | +| `--compress` | `string` | no | `BZip` | Specifies pixel compression for when the image is written. Compression is disabled by default. Please also take a look at 🤖/image/optimize. | +| `--blur` | `string` | no | `value` | Specifies gaussian blur, using a value with the form {radius}x{sigma}. | +| `--blur-regions` | `json` | no | `value` | Specifies an array of ellipse objects that should be blurred on the image. | +| `--brightness` | `number` | no | `1` | Increases or decreases the brightness of the image by using a multiplier. For example 1.5 would increase the brightness by 50%, and 0.75 would decrease the brightness by 25%. | +| `--saturation` | `number` | no | `1` | Increases or decreases the saturation of the image by using a multiplier. For example 1.5 would increase the saturation by 50%, and 0.75 would decrease the saturation by 25%. | +| `--hue` | `number` | no | `1` | Changes the hue by rotating the color of the image. The value 100 would produce no change whereas 0 and 200 will negate the colors in the image. | +| `--contrast` | `number` | no | `1` | Adjusts the contrast of the image. A value of 1 produces no change. Values below 1 decrease contrast (with 0 being minimum contrast), and values above 1 increase contrast (with 2… | +| `--watermark-url` | `string` | no | `value` | A URL indicating a PNG image to be overlaid above this image. | +| `--watermark-position` | `string[]` | no | `bottom` | The position at which the watermark is placed. The available options are "center", "top", "bottom", "left", and "right". You can also combine options, such as "bottom-right". An… | +| `--watermark-x-offset` | `number` | no | `1` | The x-offset in number of pixels at which the watermark will be placed in relation to the position it has due to watermark_position. | +| `--watermark-y-offset` | `number` | no | `1` | The y-offset in number of pixels at which the watermark will be placed in relation to the position it has due to watermark_position. | +| `--watermark-size` | `string` | no | `value` | The size of the watermark, as a percentage. For example, a value of "50%" means that size of the watermark will be 50% of the size of image on which it is placed. The exact… | +| `--watermark-resize-strategy` | `string` | no | `area` | Available values are "fit", "min_fit", "stretch" and "area". | +| `--watermark-opacity` | `number` | no | `1` | The opacity of the watermark, where 0.0 is fully transparent and 1.0 is fully opaque. | +| `--watermark-repeat-x` | `boolean` | no | `true` | When set to true, the watermark will be repeated horizontally across the entire width of the image. | +| `--watermark-repeat-y` | `boolean` | no | `true` | When set to true, the watermark will be repeated vertically across the entire height of the image. | +| `--text` | `json` | no | `value` | Text overlays to be applied to the image. Can be either a single text object or an array of text objects. Each text object contains text rules. The following text parameters are… | +| `--progressive` | `boolean` | no | `true` | Interlaces the image if set to true, which makes the image load progressively in browsers. | +| `--transparent` | `string` | no | `transparent` | Make this color transparent within the image. Example: "255,255,255". | +| `--trim-whitespace` | `boolean` | no | `true` | This determines if additional whitespace around the image should first be trimmed away. | +| `--clip` | `auto` | no | `value` | Apply the clipping path to other operations in the resize job, if one is present. | +| `--negate` | `boolean` | no | `true` | Replace each pixel with its complementary color, effectively negating the image. Especially useful when testing clipping. | +| `--density` | `string` | no | `value` | While in-memory quality and file format depth specifies the color resolution, the density of an image is the spatial (space) resolution of the image. | +| `--monochrome` | `boolean` | no | `true` | Transform the image to black and white. This is a shortcut for setting the colorspace to Gray and type to Bilevel. | +| `--shave` | `auto` | no | `value` | Shave pixels from the image edges. The value should be in the format width or widthxheight to specify the number of pixels to remove from each side. | + +**Examples** + +```bash +transloadit image resize --input input.png --out output.png +``` + +## `document convert` + +Convert documents into different formats + +Runs `/document/convert` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document convert --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/convert` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | yes | `pdf` | The desired format for document conversion. | +| `--markdown-format` | `string` | no | `commonmark` | Markdown can be represented in several variants, so when using this Robot to transform Markdown into HTML please specify which revision is being used. | +| `--markdown-theme` | `string` | no | `bare` | This parameter overhauls your Markdown files styling based on several canned presets. | +| `--pdf-margin` | `string` | no | `value` | PDF Paper margins, separated by , and with units. We support the following unit values: px, in, cm, mm. Currently this parameter is only supported when converting from html. | +| `--pdf-print-background` | `boolean` | no | `true` | Print PDF background graphics. Currently this parameter is only supported when converting from html. | +| `--pdf-format` | `string` | no | `A0` | PDF paper format. Currently this parameter is only supported when converting from html. | +| `--pdf-display-header-footer` | `boolean` | no | `true` | Display PDF header and footer. Currently this parameter is only supported when converting from html. | +| `--pdf-header-template` | `string` | no | `value` | HTML template for the PDF print header. Should be valid HTML markup with following classes used to inject printing values into them: - date formatted print date - title document… | +| `--pdf-footer-template` | `string` | no | `value` | HTML template for the PDF print footer. Should use the same format as the pdf_header_template. Currently this parameter is only supported when converting from html, and requires… | + +**Examples** + +```bash +transloadit document convert --input input.pdf --format pdf --out output.pdf +``` + +## `document optimize` + +Reduce PDF file size + +Runs `/document/optimize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document optimize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/optimize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--preset` | `string` | no | `screen` | The quality preset to use for optimization. Each preset provides a different balance between file size and quality: - screen - Lowest quality, smallest file size. Best for screen… | +| `--image-dpi` | `number` | no | `1` | Target DPI (dots per inch) for embedded images. When specified, this overrides the DPI setting from the preset. Higher DPI values result in better image quality but larger file… | +| `--compress-fonts` | `boolean` | no | `true` | Whether to compress embedded fonts. When enabled, fonts are compressed to reduce file size. | +| `--subset-fonts` | `boolean` | no | `true` | Whether to subset embedded fonts, keeping only the glyphs that are actually used in the document. | +| `--remove-metadata` | `boolean` | no | `true` | Whether to strip document metadata (title, author, keywords, etc.) from the PDF. This can provide a small reduction in file size and may be useful for privacy. | +| `--linearize` | `boolean` | no | `true` | Whether to linearize (optimize for Fast Web View) the output PDF. | +| `--compatibility` | `string` | no | `1.4` | The PDF version compatibility level. Lower versions have broader compatibility but fewer features. Higher versions support more advanced features but may not open in older PDF… | + +**Examples** + +```bash +transloadit document optimize --input input.pdf --out output.pdf +``` + +## `document auto-rotate` + +Auto-rotate documents to the correct orientation + +Runs `/document/autorotate` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document auto-rotate --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/autorotate` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit document auto-rotate --input input.pdf --out output.pdf +``` + +## `document thumbs` + +Extract thumbnail images from documents + +Runs `/document/thumbs` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit document thumbs --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/thumbs` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--page` | `number` | no | `1` | The PDF page that you want to convert to an image. By default the value is null which means that all pages will be converted into images. | +| `--format` | `string` | no | `jpg` | The format of the extracted image(s). If you specify the value "gif", then an animated gif cycling through all pages is created. Please check out this demo to learn more about… | +| `--delay` | `number` | no | `1` | If your output format is "gif" then this parameter sets the number of 100th seconds to pass before the next frame is shown in the animation. | +| `--width` | `number` | no | `1` | Width of the new image, in pixels. If not specified, will default to the width of the input image | +| `--height` | `number` | no | `1` | Height of the new image, in pixels. If not specified, will default to the height of the input image | +| `--resize-strategy` | `string` | no | `crop` | One of the available resize strategies. | +| `--background` | `string` | no | `value` | Either the hexadecimal code or name of the color used to fill the background (only used for the pad resize strategy). | +| `--alpha` | `string` | no | `Remove` | Change how the alpha channel of the resulting image should work. | +| `--density` | `string` | no | `value` | While in-memory quality and file format depth specifies the color resolution, the density of an image is the spatial (space) resolution of the image. | +| `--antialiasing` | `boolean` | no | `true` | Controls whether or not antialiasing is used to remove jagged edges from text or images in a document. | +| `--colorspace` | `string` | no | `CMY` | Sets the image colorspace. For details about the available values, see the ImageMagick documentation. Please note that if you were using "RGB", we recommend using "sRGB".… | +| `--trim-whitespace` | `boolean` | no | `true` | This determines if additional whitespace around the PDF should first be trimmed away before it is converted to an image. | +| `--pdf-use-cropbox` | `boolean` | no | `true` | Some PDF documents lie about their dimensions. For instance they'll say they are landscape, but when opened in decent Desktop readers, it's really in portrait mode. This can… | +| `--turbo` | `boolean` | no | `true` | If you set this to false, the robot will not emit files as they become available. | + +**Examples** + +```bash +transloadit document thumbs --input input.pdf --out output/ +``` + +## `audio waveform` + +Generate waveform images from audio + +Runs `/audio/waveform` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit audio waveform --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/audio/waveform` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--ffmpeg` | `json` | no | `value` | A parameter object to be passed to FFmpeg. If a preset is used, the options specified are merged on top of the ones from the preset. For available options, see the FFmpeg… | +| `--format` | `string` | no | `image` | The format of the result file. Can be "image" or "json". If "image" is supplied, a PNG image will be created, otherwise a JSON file. | +| `--width` | `number` | no | `1` | The width of the resulting image if the format "image" was selected. | +| `--height` | `number` | no | `1` | The height of the resulting image if the format "image" was selected. | +| `--antialiasing` | `auto` | no | `0` | Either a value of 0 or 1, or true/false, corresponding to if you want to enable antialiasing to achieve smoother edges in the waveform graph or not. | +| `--background-color` | `string` | no | `value` | The background color of the resulting image in the "rrggbbaa" format (red, green, blue, alpha), if the format "image" was selected. | +| `--center-color` | `string` | no | `value` | The color used in the center of the gradient. The format is "rrggbbaa" (red, green, blue, alpha). | +| `--outer-color` | `string` | no | `value` | The color used in the outer parts of the gradient. The format is "rrggbbaa" (red, green, blue, alpha). | +| `--style` | `string` | no | `v0` | Waveform style version. - "v0": Legacy waveform generation (default). - "v1": Advanced waveform generation with additional parameters. For backwards compatibility, numeric values… | +| `--split-channels` | `boolean` | no | `true` | Available when style is "v1". If set to true, outputs multi-channel waveform data or image files, one per channel. | +| `--zoom` | `number` | no | `1` | Available when style is "v1". Zoom level in samples per pixel. This parameter cannot be used together with pixels_per_second. | +| `--pixels-per-second` | `number` | no | `1` | Available when style is "v1". Zoom level in pixels per second. This parameter cannot be used together with zoom. | +| `--bits` | `number` | no | `8` | Available when style is "v1". Bit depth for waveform data. Can be 8 or 16. | +| `--start` | `number` | no | `1` | Available when style is "v1". Start time in seconds. | +| `--end` | `number` | no | `1` | Available when style is "v1". End time in seconds (0 means end of audio). | +| `--colors` | `string` | no | `audition` | Available when style is "v1". Color scheme to use. Can be "audition" or "audacity". | +| `--border-color` | `string` | no | `value` | Available when style is "v1". Border color in "rrggbbaa" format. | +| `--waveform-style` | `string` | no | `normal` | Available when style is "v1". Waveform style. Can be "normal" or "bars". | +| `--bar-width` | `number` | no | `1` | Available when style is "v1". Width of bars in pixels when waveform_style is "bars". | +| `--bar-gap` | `number` | no | `1` | Available when style is "v1". Gap between bars in pixels when waveform_style is "bars". | +| `--bar-style` | `string` | no | `square` | Available when style is "v1". Bar style when waveform_style is "bars". | +| `--axis-label-color` | `string` | no | `value` | Available when style is "v1". Color for axis labels in "rrggbbaa" format. | +| `--no-axis-labels` | `boolean` | no | `true` | Available when style is "v1". If set to true, renders waveform image without axis labels. | +| `--with-axis-labels` | `boolean` | no | `true` | Available when style is "v1". If set to true, renders waveform image with axis labels. | +| `--amplitude-scale` | `number` | no | `1` | Available when style is "v1". Amplitude scale factor. | +| `--compression` | `number` | no | `1` | Available when style is "v1". PNG compression level: 0 (none) to 9 (best), or -1 (default). Only applicable when format is "image". | + +**Examples** + +```bash +transloadit audio waveform --input input.mp3 --out output.png +``` + +## `text speak` + +Speak text + +Runs `/text/speak` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit text speak --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/text/speak` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--prompt` | `string` | no | `"A red bicycle in a studio"` | Which text to speak. You can also set this to null and supply an input text file. | +| `--provider` | `string` | yes | `aws` | Which AI provider to leverage. Transloadit outsources this task and abstracts the interface so you can expect the same data structures, but different latencies and information… | +| `--target-language` | `string` | no | `en-US` | The written language of the document. This will also be the language of the spoken text. The language should be specified in the BCP-47 format, such as "en-GB", "de-DE" or… | +| `--voice` | `string` | no | `female-1` | The gender to be used for voice synthesis. Please consult the list of supported languages and voices. | +| `--ssml` | `boolean` | no | `true` | Supply Speech Synthesis Markup Language instead of raw text, in order to gain more control over how your text is voiced, including rests and pronounciations. | + +**Examples** + +```bash +transloadit text speak --input input.pdf --provider aws --out output.mp3 +``` + +## `video thumbs` + +Extract thumbnails from videos + +Runs `/video/thumbs` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit video thumbs --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/video/thumbs` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--ffmpeg` | `json` | no | `value` | A parameter object to be passed to FFmpeg. If a preset is used, the options specified are merged on top of the ones from the preset. For available options, see the FFmpeg… | +| `--count` | `number` | no | `1` | The number of thumbnails to be extracted. As some videos have incorrect durations, the actual number of thumbnails generated may be less in rare cases. The maximum number of… | +| `--offsets` | `auto` | no | `value` | An array of offsets representing seconds of the file duration, such as [ 2, 45, 120 ]. | +| `--format` | `string` | no | `jpg` | The format of the extracted thumbnail. Supported values are "jpg", "jpeg" and "png". Even if you specify the format to be "jpeg" the resulting thumbnails will have a "jpg" file… | +| `--width` | `number` | no | `1` | The width of the thumbnail, in pixels. Defaults to the original width of the video. | +| `--height` | `number` | no | `1` | The height of the thumbnail, in pixels. Defaults to the original height of the video. | +| `--resize-strategy` | `string` | no | `crop` | One of the available resize strategies. | +| `--background` | `string` | no | `value` | The background color of the resulting thumbnails in the "rrggbbaa" format (red, green, blue, alpha) when used with the "pad" resize strategy. The default color is black. | +| `--rotate` | `number` | no | `0` | Forces the video to be rotated by the specified degree integer. | +| `--input-codec` | `string` | no | `value` | Specifies the input codec to use when decoding the video. This is useful for videos with special codecs that require specific decoders. | + +**Examples** + +```bash +transloadit video thumbs --input input.mp4 --out output/ +``` + +## `video encode-hls` + +Run builtin/encode-hls-video@latest + +Runs the `builtin/encode-hls-video@latest` template and writes the outputs to `--out`. + +**Usage** + +```bash +npx transloadit video encode-hls --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `builtin/encode-hls-video@latest` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit video encode-hls --input input.mp4 --out output/ +``` + +## `image describe` + +Describe images as labels or publishable text fields + +Generates image labels through `/image/describe`, or structured altText/title/caption/description through `/ai/chat`, then writes the JSON result to `--out`. + +**Usage** + +```bash +npx transloadit image describe --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `image-describe` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--fields` | `string[]` | no | — | Describe output fields to generate, for example labels or altText,title,caption,description | +| `--for` | `string` | no | — | Use a named output profile, currently: wordpress | +| `--model` | `string` | no | — | Model to use for generated text fields (default: anthropic/claude-4-sonnet-20250514) | + +**Examples** + +```bash +# Describe an image as labels +transloadit image describe --input hero.jpg --out labels.json +# Generate WordPress-ready fields +transloadit image describe --input hero.jpg --for wordpress --out fields.json +# Request a custom field set +transloadit image describe --input hero.jpg --fields altText,title,caption --out fields.json +``` + +## `markdown pdf` + +Render Markdown files as PDFs + +Runs `/document/convert` with `format: pdf`, letting the backend render Markdown and preserve features such as internal heading links in the generated PDF. + +**Usage** + +```bash +npx transloadit markdown pdf --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `markdown-pdf` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--markdown-format` | `string` | no | — | Markdown variant to parse, either commonmark or gfm | +| `--markdown-theme` | `string` | no | — | Markdown theme to render, either github or bare | + +**Examples** + +```bash +# Render a Markdown file as a PDF file +transloadit markdown pdf --input README.md --out README.pdf +# Print a temporary result URL without downloading locally +transloadit markdown pdf --input README.md --print-urls +``` + +## `markdown docx` + +Render Markdown files as DOCX documents + +Runs `/document/convert` with `format: docx`, letting the backend render Markdown and convert it into a Word document. + +**Usage** + +```bash +npx transloadit markdown docx --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `markdown-docx` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--markdown-format` | `string` | no | — | Markdown variant to parse, either commonmark or gfm | +| `--markdown-theme` | `string` | no | — | Markdown theme to render, either github or bare | + +**Examples** + +```bash +# Render a Markdown file as a DOCX file +transloadit markdown docx --input README.md --out README.docx +# Print a temporary result URL without downloading locally +transloadit markdown docx --input README.md --print-urls +``` + +## `file compress` + +Compress files + +Runs `/file/compress` for the provided inputs and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit file compress --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: single assembly +- Backend: `/file/compress` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `zip` | The format of the archive to be created. Supported values are "tar" and "zip". Note that "tar" without setting gzip to true results in an archive that's not compressed in any way. | +| `--gzip` | `boolean` | no | `true` | Determines if the result archive should also be gzipped. Gzip compression is only applied if you use the "tar" format. | +| `--password` | `string` | no | `value` | This allows you to encrypt all archive contents with a password and thereby protect it against unauthorized use. | +| `--compression-level` | `number` | no | `1` | Determines how fiercely to try to compress the archive. -0 is compressionless, which is suitable for media that is already compressed. -1 is fastest with lowest compression. -9… | +| `--file-layout` | `string` | no | `advanced` | Determines if the result archive should contain all files in one directory (value for this is "simple") or in subfolders according to the explanation below (value for this is… | +| `--archive-name` | `string` | no | `value` | The name of the archive file to be created (without the file extension). | + +**Examples** + +```bash +transloadit file compress --input input.file --out output.file +``` + +## `file decompress` + +Decompress archives + +Runs `/file/decompress` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit file decompress --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/file/decompress` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit file decompress --input input.file --out output/ +``` diff --git a/packages/node/package.json b/packages/node/package.json index b2da1b2c..1e21f827 100644 --- a/packages/node/package.json +++ b/packages/node/package.json @@ -36,6 +36,7 @@ "@aws-sdk/s3-request-presigner": "^3.891.0", "@transloadit/sev-logger": "^0.1.9", "@transloadit/utils": "^4.3.0", + "cacheable-lookup": "^7.0.0", "clipanion": "^4.0.0-rc.4", "debug": "^4.4.3", "dotenv": "^17.2.3", @@ -82,13 +83,14 @@ "src": "./src" }, "scripts": { - "check": "yarn lint:ts && yarn fix && yarn test:unit", + "check": "yarn sync:intent-docs && yarn lint:ts && yarn fix && yarn test:unit", + "sync:intent-docs": "node src/cli/generateIntentDocs.ts", "fix:js": "biome check --write .", "lint:ts": "yarn --cwd ../.. tsc:node", "fix:js:unsafe": "biome check --write . --unsafe", "lint:js": "biome check .", - "lint": "npm-run-all --parallel 'lint:js'", - "fix": "npm-run-all --serial 'fix:js'", + "lint": "yarn lint:js", + "fix": "yarn fix:js", "lint:deps": "knip --dependencies --no-progress", "fix:deps": "knip --dependencies --no-progress --fix", "prepack": "node -e \"require('node:fs').rmSync('dist',{recursive:true,force:true})\" && rm -f tsconfig.tsbuildinfo tsconfig.build.tsbuildinfo && yarn --cwd ../.. tsc:node", diff --git a/packages/node/scripts/test-intents-e2e.sh b/packages/node/scripts/test-intents-e2e.sh new file mode 100755 index 00000000..5099ea96 --- /dev/null +++ b/packages/node/scripts/test-intents-e2e.sh @@ -0,0 +1,304 @@ +#!/usr/bin/env bash + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +WORKDIR="${1:-/tmp/node-sdk-intent-e2e}" +OUTDIR="$WORKDIR/out" +LOGDIR="$WORKDIR/logs" +FIXTUREDIR="$WORKDIR/fixtures" +CLI=(node "$REPO_ROOT/packages/node/src/cli.ts") +PREVIEW_URL='https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf' + +if [[ -f "$REPO_ROOT/.env" ]]; then + set -a + # shellcheck disable=SC1090 + source "$REPO_ROOT/.env" + set +a +fi + +if [[ -z "${TRANSLOADIT_KEY:-}" || -z "${TRANSLOADIT_SECRET:-}" ]]; then + echo "Missing TRANSLOADIT_KEY / TRANSLOADIT_SECRET. Expected them in $REPO_ROOT/.env or the environment." >&2 + exit 1 +fi + +require_command() { + local command_name="$1" + if ! command -v "$command_name" >/dev/null 2>&1; then + echo "Missing required command: $command_name" >&2 + exit 1 + fi +} + +prepare_fixtures() { + require_command curl + require_command ffmpeg + require_command zip + + rm -rf "$WORKDIR" + mkdir -p "$OUTDIR" "$LOGDIR" "$FIXTUREDIR" + + cp "$REPO_ROOT/packages/node/examples/fixtures/berkley.jpg" "$FIXTUREDIR/input.jpg" + cp "$REPO_ROOT/packages/node/test/e2e/fixtures/testsrc.mp4" "$FIXTUREDIR/input.mp4" + printf 'Hello from Transloadit CLI intents\n' >"$FIXTUREDIR/input.txt" + cat >"$FIXTUREDIR/input.md" <<'EOF' +# CLI Intents + +This is a **Markdown** fixture. + +## Features + +- headings render +- lists render +- emphasis renders +EOF + zip -j "$FIXTUREDIR/input.zip" "$FIXTUREDIR/input.txt" >/dev/null + ffmpeg -f lavfi -i sine=frequency=1000:duration=1 -q:a 9 -acodec libmp3lame -y "$FIXTUREDIR/input.mp3" >/dev/null 2>&1 + curl -L --fail --silent --show-error -o "$FIXTUREDIR/input.pdf" "$PREVIEW_URL" +} + +verify_file_type() { + local path="$1" + local expected="$2" + + [[ -s "$path" ]] || return 1 + file "$path" | grep -F "$expected" >/dev/null +} + +verify_png() { + verify_file_type "$1" 'PNG image data' +} + +verify_jpeg() { + verify_file_type "$1" 'JPEG image data' +} + +verify_pdf() { + verify_file_type "$1" 'PDF document' +} + +verify_docx() { + verify_file_type "$1" 'Microsoft OOXML' +} + +verify_mp3() { + verify_file_type "$1" 'Audio file' +} + +verify_zip() { + verify_file_type "$1" 'Zip archive data' +} + +verify_document_thumbs() { + local first_png + first_png="$(find "$1" -maxdepth 1 -type f -name '*.png' | sort | head -n 1)" + [[ -n "$first_png" ]] || return 1 + verify_png "$first_png" +} + +verify_video_thumbs() { + local first_jpeg + first_jpeg="$(find "$1" -maxdepth 1 -type f -name '*.jpg' | sort | head -n 1)" + [[ -n "$first_jpeg" ]] || return 1 + verify_jpeg "$first_jpeg" +} + +verify_video_encode_hls() { + [[ -f "$1/high/input.mp4" ]] || return 1 + [[ -f "$1/low/input.mp4" ]] || return 1 + [[ -f "$1/mid/input.mp4" ]] || return 1 + [[ -f "$1/adaptive/my_playlist.m3u8" ]] || return 1 +} + +verify_file_decompress() { + [[ -f "$1/input.txt" ]] || return 1 + grep -F 'Hello from Transloadit CLI intents' "$1/input.txt" >/dev/null +} + +verify_json() { + node --input-type=module <<'NODE' "$1" +import { readFileSync } from 'node:fs' + +const value = JSON.parse(readFileSync(process.argv[1], 'utf8')) +const ok = + value != null && + (!Array.isArray(value) || value.length > 0) && + (typeof value !== 'object' || Object.keys(value).length > 0) + +process.exit(ok ? 0 : 1) +NODE +} + +verify_image_describe_labels() { + node --input-type=module <<'NODE' "$1" +import { readFileSync } from 'node:fs' + +const value = JSON.parse(readFileSync(process.argv[1], 'utf8')) +const ok = + Array.isArray(value) && + value.length > 0 && + value.every((item) => typeof item === 'string' || (item && typeof item.name === 'string')) + +process.exit(ok ? 0 : 1) +NODE +} + +verify_image_describe_wordpress() { + node --input-type=module <<'NODE' "$1" +import { readFileSync } from 'node:fs' + +const value = JSON.parse(readFileSync(process.argv[1], 'utf8')) +const required = ['altText', 'title', 'caption', 'description'] +const ok = + value && + typeof value === 'object' && + required.every((key) => typeof value[key] === 'string' && value[key].trim().length > 0) + +process.exit(ok ? 0 : 1) +NODE +} + +verify_output() { + local verifier="$1" + local path="$2" + + case "$verifier" in + json) verify_json "$path" ;; + png) verify_png "$path" ;; + jpeg) verify_jpeg "$path" ;; + pdf) verify_pdf "$path" ;; + docx) verify_docx "$path" ;; + mp3) verify_mp3 "$path" ;; + zip) verify_zip "$path" ;; + document-thumbs) verify_document_thumbs "$path" ;; + video-thumbs) verify_video_thumbs "$path" ;; + video-encode-hls) verify_video_encode_hls "$path" ;; + file-decompress) verify_file_decompress "$path" ;; + image-describe-labels) verify_image_describe_labels "$path" ;; + image-describe-wordpress) verify_image_describe_wordpress "$path" ;; + *) + echo "Unknown verifier: $verifier" >&2 + return 1 + ;; + esac +} + +resolve_placeholder() { + local arg="$1" + + case "$arg" in + @preview-url) printf '%s\n' "$PREVIEW_URL" ;; + @fixture/*) printf '%s\n' "$FIXTUREDIR/${arg#@fixture/}" ;; + *) printf '%s\n' "$arg" ;; + esac +} + +run_case() { + local name="$1" + local output_path="$2" + local verifier="$3" + shift 3 + + local logfile="$LOGDIR/${name}.log" + rm -rf "$output_path" + mkdir -p "$(dirname "$output_path")" + + set +e + "${CLI[@]}" "$@" >"$logfile" 2>&1 + local exit_code=$? + set -e + + local verdict='FAIL' + local detail='' + + if [[ $exit_code -eq 0 ]] && verify_output "$verifier" "$output_path"; then + verdict='OK' + if [[ -f "$output_path" ]]; then + detail="$(file "$output_path" | sed 's#^.*: ##' | tr '\n' ' ' | awk '{$1=$1; print}')" + else + detail="$(find "$output_path" -type f | sed "s#^$output_path/##" | sort | tr '\n' ',' | sed 's/,$//')" + fi + else + if [[ -s "$logfile" ]]; then + detail="$(tail -n 8 "$logfile" | tr '\n' ' ' | awk '{$1=$1; print}' | cut -c1-220)" + else + detail='No output captured' + fi + fi + + printf '%s\t%s\t%s\t%s\n' "$name" "$exit_code" "$verdict" "$detail" +} + +prepare_fixtures + +RESULTS_TSV="$WORKDIR/results.tsv" +printf 'command\texit\tverdict\tdetail\n' >"$RESULTS_TSV" + +while IFS=$'\t' read -r name path_string args_string output_rel verifier; do + [[ -n "$name" ]] || continue + + read -r -a path_parts <<<"$path_string" + IFS=$'\x1f' read -r -a raw_args <<<"$args_string" + + resolved_args=() + for arg in "${raw_args[@]}"; do + resolved_args+=("$(resolve_placeholder "$arg")") + done + + run_case "$name" "$OUTDIR/$output_rel" "$verifier" \ + "${path_parts[@]}" \ + "${resolved_args[@]}" \ + --out "$OUTDIR/$output_rel" \ + >>"$RESULTS_TSV" +done < <( + node --input-type=module <<'NODE' +import { intentSmokeCases } from './packages/node/test/support/intentSmokeCases.ts' + +for (const smokeCase of intentSmokeCases) { + console.log([ + smokeCase.paths.join('-'), + smokeCase.paths.join(' '), + smokeCase.args.join('\x1f'), + smokeCase.outputPath, + smokeCase.verifier, + ].join('\t')) +} + +for (const smokeCase of [ + { + name: 'image-describe-labels', + paths: ['image', 'describe'], + args: ['--input', '@fixture/input.jpg', '--fields', 'labels'], + outputPath: 'image-describe-labels.json', + verifier: 'image-describe-labels', + }, + { + name: 'image-describe-wordpress', + paths: ['image', 'describe'], + args: ['--input', '@fixture/input.jpg', '--for', 'wordpress'], + outputPath: 'image-describe-wordpress.json', + verifier: 'image-describe-wordpress', + }, +]) { + console.log([ + smokeCase.name, + smokeCase.paths.join(' '), + smokeCase.args.join('\x1f'), + smokeCase.outputPath, + smokeCase.verifier, + ].join('\t')) +} +NODE +) + +column -t -s $'\t' "$RESULTS_TSV" + +if awk -F '\t' 'NR > 1 && $3 != "OK" { exit 1 }' "$RESULTS_TSV"; then + echo + echo "All intent commands passed. Fixtures, outputs, and logs are in $WORKDIR" +else + echo + echo "One or more intent commands failed. Inspect $LOGDIR for details." >&2 + exit 1 +fi diff --git a/packages/node/src/Transloadit.ts b/packages/node/src/Transloadit.ts index 5878b93a..18ad3ef8 100644 --- a/packages/node/src/Transloadit.ts +++ b/packages/node/src/Transloadit.ts @@ -68,12 +68,11 @@ export { TimeoutError, UploadError, } from 'got' -export type { AssemblyStatus } from './alphalib/types/assemblyStatus.ts' -export * from './apiTypes.ts' -export { InconsistentResponseError, ApiError } export { extractFieldNamesFromTemplate } from './alphalib/stepParsing.ts' // Builtin templates replace the legacy golden template helpers. export { mergeTemplateContent } from './alphalib/templateMerge.ts' +export type { AssemblyStatus } from './alphalib/types/assemblyStatus.ts' +export * from './apiTypes.ts' export type { Base64Strategy, InputFile, @@ -93,6 +92,7 @@ export type { RobotParamHelp, } from './robots.ts' export { getRobotHelp, isKnownRobot, listRobots } from './robots.ts' +export { ApiError, InconsistentResponseError } const log = debug('transloadit') const logWarn = debug('transloadit:warn') diff --git a/packages/node/src/alphalib/types/robots/ai-chat.ts b/packages/node/src/alphalib/types/robots/ai-chat.ts index af2bc783..7a92b061 100644 --- a/packages/node/src/alphalib/types/robots/ai-chat.ts +++ b/packages/node/src/alphalib/types/robots/ai-chat.ts @@ -148,6 +148,7 @@ export const meta: RobotMetaInput = { export const MODEL_CAPABILITIES: Record = { 'anthropic/claude-4-sonnet-20250514': { pdf: true, image: true }, 'anthropic/claude-4-opus-20250514': { pdf: true, image: true }, + 'anthropic/claude-sonnet-4-6': { pdf: true, image: true }, 'anthropic/claude-sonnet-4-5': { pdf: true, image: true }, 'anthropic/claude-opus-4-5': { pdf: true, image: true }, 'anthropic/claude-opus-4-6': { pdf: true, image: true }, diff --git a/packages/node/src/cli.ts b/packages/node/src/cli.ts index bdcd0b93..d7eedc1e 100644 --- a/packages/node/src/cli.ts +++ b/packages/node/src/cli.ts @@ -6,6 +6,7 @@ import process from 'node:process' import { fileURLToPath } from 'node:url' import 'dotenv/config' import { createCli } from './cli/commands/index.ts' +import { ensureError } from './cli/types.ts' const currentFile = realpathSync(fileURLToPath(import.meta.url)) @@ -32,13 +33,13 @@ export async function main(args = process.argv.slice(2)): Promise { } } -export function runCliWhenExecuted(): void { +export async function runCliWhenExecuted(): Promise { if (!shouldRunCli(process.argv[1])) return - void main().catch((error) => { - console.error((error as Error).message) + await main().catch((error) => { + console.error(ensureError(error).message) process.exitCode = 1 }) } -runCliWhenExecuted() +await runCliWhenExecuted() diff --git a/packages/node/src/cli/commands/assemblies.ts b/packages/node/src/cli/commands/assemblies.ts index a3def35b..7e28e9cb 100644 --- a/packages/node/src/cli/commands/assemblies.ts +++ b/packages/node/src/cli/commands/assemblies.ts @@ -1,12 +1,13 @@ +import { randomUUID } from 'node:crypto' import EventEmitter from 'node:events' import fs from 'node:fs' import fsp from 'node:fs/promises' import path from 'node:path' import process from 'node:process' -import type { Readable, Writable } from 'node:stream' +import type { Readable } from 'node:stream' +import { Writable } from 'node:stream' import { pipeline } from 'node:stream/promises' import { setTimeout as delay } from 'node:timers/promises' -import tty from 'node:tty' import { promisify } from 'node:util' import { Command, Option } from 'clipanion' import got from 'got' @@ -15,15 +16,31 @@ import * as t from 'typanion' import { z } from 'zod' import { formatLintIssue } from '../../alphalib/assembly-linter.lang.en.ts' import { tryCatch } from '../../alphalib/tryCatch.ts' -import type { Steps, StepsInput } from '../../alphalib/types/template.ts' -import { stepsSchema } from '../../alphalib/types/template.ts' +import type { StepsInput } from '../../alphalib/types/template.ts' import type { CreateAssemblyParams, ReplayAssemblyParams } from '../../apiTypes.ts' +import { ensureUniqueCounterValue } from '../../ensureUniqueCounter.ts' import type { LintFatalLevel } from '../../lintAssemblyInstructions.ts' import { lintAssemblyInstructions } from '../../lintAssemblyInstructions.ts' import type { CreateAssemblyOptions, Transloadit } from '../../Transloadit.ts' import { lintingExamples } from '../docs/assemblyLintingExamples.ts' -import { createReadStream, formatAPIError, readCliInput, streamToBuffer } from '../helpers.ts' +import { + concurrencyOption, + deleteAfterProcessingOption, + inputPathsOption, + printUrlsOption, + recursiveOption, + reprocessStaleOption, + singleAssemblyOption, + validateSharedFileProcessingOptions, + watchOption, +} from '../fileProcessingOptions.ts' +import { formatAPIError, readCliInput } from '../helpers.ts' import type { IOutputCtl } from '../OutputCtl.ts' +import type { NormalizedAssemblyResultFile, NormalizedAssemblyResults } from '../resultFiles.ts' +import { normalizeAssemblyResults } from '../resultFiles.ts' +import type { ResultUrlRow } from '../resultUrls.ts' +import { collectNormalizedResultUrlRows, printResultUrls } from '../resultUrls.ts' +import { readStepsInputFile } from '../stepsInput.ts' import { ensureError, isErrnoException } from '../types.ts' import { AuthenticatedCommand, UnauthenticatedCommand } from './BaseCommand.ts' @@ -61,6 +78,30 @@ export interface AssemblyLintOptions { json?: boolean } +function parseTemplateFieldAssignments( + output: IOutputCtl, + fields: string[] | undefined, +): Record | undefined { + if (fields == null || fields.length === 0) { + return undefined + } + + const fieldsMap: Record = {} + for (const field of fields) { + const eqIndex = field.indexOf('=') + if (eqIndex === -1) { + output.error(`invalid argument for --field: '${field}'`) + return undefined + } + + const key = field.slice(0, eqIndex) + const value = field.slice(eqIndex + 1) + fieldsMap[key] = value + } + + return fieldsMap +} + const AssemblySchema = z.object({ id: z.string(), }) @@ -148,13 +189,7 @@ export async function replay( ): Promise { if (steps) { try { - const buf = await streamToBuffer(createReadStream(steps)) - const parsed: unknown = JSON.parse(buf.toString()) - const validated = stepsSchema.safeParse(parsed) - if (!validated.success) { - throw new Error(`Invalid steps format: ${validated.error.message}`) - } - await apiCall(validated.data) + await apiCall(await readStepsInputFile(steps)) } catch (err) { const error = ensureError(err) output.error(error.message) @@ -163,14 +198,13 @@ export async function replay( await apiCall() } - async function apiCall(stepsOverride?: Steps): Promise { + async function apiCall(stepsOverride?: StepsInput): Promise { const promises = assemblies.map(async (assembly) => { const [err] = await tryCatch( client.replayAssembly(assembly, { reparse_template: reparse ? 1 : 0, fields, notify_url, - // Steps (validated) is assignable to StepsInput at runtime; cast for TS steps: stepsOverride as ReplayAssemblyParams['steps'], }), ) @@ -298,49 +332,44 @@ async function getNodeWatch(): Promise { const stdinWithPath = process.stdin as unknown as { path: string } stdinWithPath.path = '/dev/stdin' -interface OutStream extends Writable { +interface OutputPlan { + mtime: Date path?: string - mtime?: Date } interface Job { - in: Readable | null - out: OutStream | null + inputPath: string | null + out: OutputPlan | null + watchEvent?: boolean } -type OutstreamProvider = (inpath: string | null, indir?: string) => Promise - -interface StreamRegistry { - [key: string]: OutStream | undefined -} +type OutputPlanProvider = (inpath: string | null, indir?: string) => Promise interface JobEmitterOptions { + allowOutputCollisions?: boolean recursive?: boolean - outstreamProvider: OutstreamProvider - streamRegistry: StreamRegistry + outputPlanProvider: OutputPlanProvider + singleAssembly?: boolean watch?: boolean reprocessStale?: boolean } interface ReaddirJobEmitterOptions { dir: string - streamRegistry: StreamRegistry recursive?: boolean - outstreamProvider: OutstreamProvider + outputPlanProvider: OutputPlanProvider topdir?: string } interface SingleJobEmitterOptions { file: string - streamRegistry: StreamRegistry - outstreamProvider: OutstreamProvider + outputPlanProvider: OutputPlanProvider } interface WatchJobEmitterOptions { file: string - streamRegistry: StreamRegistry recursive?: boolean - outstreamProvider: OutstreamProvider + outputPlanProvider: OutputPlanProvider } interface StatLike { @@ -360,12 +389,49 @@ async function myStat( return await fsp.stat(filepath) } -function dirProvider(output: string): OutstreamProvider { +function getJobInputPath(filepath: string): string { + const normalizedFile = path.normalize(filepath) + if (normalizedFile === '-') { + return stdinWithPath.path + } + + return normalizedFile +} + +function createInputUploadStream(filepath: string): Readable { + const instream = fs.createReadStream(filepath) + // Attach a no-op error handler to prevent unhandled errors if stream is destroyed + // before being consumed (e.g., due to output collision detection) + instream.on('error', () => {}) + return instream +} + +function createOutputPlan(pathname: string | undefined, mtime: Date): OutputPlan { + if (pathname == null) { + return { + mtime, + } + } + + return { + mtime, + path: pathname, + } +} + +async function createExistingPathOutputPlan(outputPath: string | undefined): Promise { + if (outputPath == null) { + return createOutputPlan(undefined, new Date(0)) + } + + const [, stats] = await tryCatch(fsp.stat(outputPath)) + return createOutputPlan(outputPath, stats?.mtime ?? new Date(0)) +} + +function dirProvider(output: string): OutputPlanProvider { return async (inpath, indir = process.cwd()) => { - // Inputless assemblies can still write into a directory, but output paths are derived from - // assembly results rather than an input file path (handled later). if (inpath == null) { - return null + return await createExistingPathOutputPlan(output) } if (inpath === '-') { throw new Error('You must provide an input to output to a directory') @@ -374,41 +440,372 @@ function dirProvider(output: string): OutstreamProvider { let relpath = path.relative(indir, inpath) relpath = relpath.replace(/^(\.\.\/)+/, '') const outpath = path.join(output, relpath) - const outdir = path.dirname(outpath) - - await fsp.mkdir(outdir, { recursive: true }) - const [, stats] = await tryCatch(fsp.stat(outpath)) - const mtime = stats?.mtime ?? new Date(0) - const outstream = fs.createWriteStream(outpath) as OutStream - // Attach a no-op error handler to prevent unhandled errors if stream is destroyed - // before being consumed (e.g., due to output collision detection) - outstream.on('error', () => {}) - outstream.mtime = mtime - return outstream + return await createExistingPathOutputPlan(outpath) } } -function fileProvider(output: string): OutstreamProvider { - const dirExistsP = fsp.mkdir(path.dirname(output), { recursive: true }) +function fileProvider(output: string): OutputPlanProvider { return async (_inpath) => { - await dirExistsP - if (output === '-') return process.stdout as OutStream - - const [, stats] = await tryCatch(fsp.stat(output)) - const mtime = stats?.mtime ?? new Date(0) - const outstream = fs.createWriteStream(output) as OutStream - // Attach a no-op error handler to prevent unhandled errors if stream is destroyed - // before being consumed (e.g., due to output collision detection) - outstream.on('error', () => {}) - outstream.mtime = mtime - return outstream + if (output === '-') { + return await createExistingPathOutputPlan(undefined) + } + + return await createExistingPathOutputPlan(output) } } -function nullProvider(): OutstreamProvider { +function nullProvider(): OutputPlanProvider { return async (_inpath) => null } +async function downloadResultToFile( + resultUrl: string, + outPath: string, + signal: AbortSignal, +): Promise { + await fsp.mkdir(path.dirname(outPath), { recursive: true }) + + const tempPath = path.join( + path.dirname(outPath), + `.${path.basename(outPath)}.${randomUUID()}.tmp`, + ) + const outStream = fs.createWriteStream(tempPath) + outStream.on('error', () => {}) + + const [dlErr] = await tryCatch(pipeline(got.stream(resultUrl, { signal }), outStream)) + if (dlErr) { + await fsp.rm(tempPath, { force: true }) + throw dlErr + } + + await fsp.rename(tempPath, outPath) +} + +async function downloadResultToStdout(resultUrl: string, signal: AbortSignal): Promise { + const stdoutStream = new Writable({ + write(chunk, _encoding, callback) { + let settled = false + + const finish = (err?: Error | null) => { + if (settled) return + settled = true + process.stdout.off('drain', onDrain) + process.stdout.off('error', onError) + callback(err ?? undefined) + } + + const onDrain = () => finish() + const onError = (err: Error) => finish(err) + + process.stdout.once('error', onError) + + try { + if (process.stdout.write(chunk)) { + finish() + return + } + + process.stdout.once('drain', onDrain) + } catch (err) { + finish(ensureError(err)) + } + }, + final(callback) { + callback() + }, + }) + + await pipeline(got.stream(resultUrl, { signal }), stdoutStream) +} + +function sanitizeResultName(value: string): string { + const base = path.basename(value) + return base.replaceAll('\\', '_').replaceAll('/', '_').replaceAll('\u0000', '') +} + +async function ensureUniquePath(targetPath: string, reservedPaths: Set): Promise { + const parsed = path.parse(targetPath) + return await ensureUniqueCounterValue({ + initialValue: targetPath, + isTaken: async (candidate) => { + if (reservedPaths.has(candidate)) { + return true + } + + const [statErr] = await tryCatch(fsp.stat(candidate)) + return statErr == null + }, + reserve: (candidate) => { + reservedPaths.add(candidate) + }, + nextValue: (counter) => path.join(parsed.dir, `${parsed.name}__${counter}${parsed.ext}`), + scope: reservedPaths, + }) +} + +function getResultFileName(file: NormalizedAssemblyResultFile): string { + return sanitizeResultName(file.name) +} + +interface AssemblyDownloadTarget { + resultUrl: string + targetPath: string | null +} + +const STALE_OUTPUT_GRACE_MS = 1000 + +function isMeaningfullyNewer(newer: Date, older: Date): boolean { + return newer.getTime() - older.getTime() > STALE_OUTPUT_GRACE_MS +} + +async function buildDirectoryDownloadTargets({ + allFiles, + baseDir, + groupByStep, + reservedPaths, +}: { + allFiles: NormalizedAssemblyResultFile[] + baseDir: string + groupByStep: boolean + reservedPaths: Set +}): Promise { + await fsp.mkdir(baseDir, { recursive: true }) + + const targets: AssemblyDownloadTarget[] = [] + for (const resultFile of allFiles) { + const targetDir = groupByStep ? path.join(baseDir, resultFile.stepName) : baseDir + await fsp.mkdir(targetDir, { recursive: true }) + + targets.push({ + resultUrl: resultFile.url, + targetPath: await ensureUniquePath( + path.join(targetDir, getResultFileName(resultFile)), + reservedPaths, + ), + }) + } + + return targets +} + +function getSingleResultDownloadTarget( + allFiles: NormalizedAssemblyResultFile[], + targetPath: string | null, +): AssemblyDownloadTarget[] { + const first = allFiles[0] + const resultUrl = first?.url ?? null + if (resultUrl == null) { + return [] + } + + return [{ resultUrl, targetPath }] +} + +async function resolveResultDownloadTargets({ + hasDirectoryInput, + inPath, + inputs, + normalizedResults, + outputMode, + outputPath, + outputRoot, + outputRootIsDirectory, + reservedPaths, + singleAssembly, +}: { + hasDirectoryInput: boolean + inPath: string | null + inputs: string[] + normalizedResults: NormalizedAssemblyResults + outputMode?: 'directory' | 'file' + outputPath: string | null + outputRoot: string + outputRootIsDirectory: boolean + reservedPaths: Set + singleAssembly?: boolean +}): Promise { + const { allFiles, entries } = normalizedResults + const shouldGroupByInput = + !singleAssembly && inPath != null && (hasDirectoryInput || inputs.length > 1) + + const resolveDirectoryBaseDir = (): string => { + if (!shouldGroupByInput || inPath == null) { + return outputRoot + } + + if (hasDirectoryInput && outputPath != null) { + const mappedRelative = path.relative(outputRoot, outputPath) + const mappedDir = path.dirname(mappedRelative) + const mappedStem = path.parse(mappedRelative).name + return path.join(outputRoot, mappedDir === '.' ? '' : mappedDir, mappedStem) + } + + return path.join(outputRoot, path.parse(path.basename(inPath)).name) + } + + if (!outputRootIsDirectory) { + if (allFiles.length > 1) { + if (outputPath == null) { + throw new Error('stdout can only receive a single result file') + } + + throw new Error('file outputs can only receive a single result file') + } + + return getSingleResultDownloadTarget(allFiles, outputPath) + } + + if (singleAssembly) { + return await buildDirectoryDownloadTargets({ + allFiles, + baseDir: outputRoot, + groupByStep: false, + reservedPaths, + }) + } + + if (outputMode === 'directory' || outputPath == null || inPath == null) { + return await buildDirectoryDownloadTargets({ + allFiles, + baseDir: resolveDirectoryBaseDir(), + groupByStep: entries.length > 1, + reservedPaths, + }) + } + + if (allFiles.length === 1) { + return getSingleResultDownloadTarget(allFiles, outputPath) + } + + return await buildDirectoryDownloadTargets({ + allFiles, + baseDir: path.join(path.dirname(outputPath), path.parse(outputPath).name), + groupByStep: true, + reservedPaths, + }) +} + +async function shouldSkipStaleOutput({ + inputPaths, + outputPath, + outputPlanMtime, + outputRootIsDirectory, + reprocessStale, + singleInputReference = 'output-plan', +}: { + inputPaths: string[] + outputPath: string | null + outputPlanMtime: Date + outputRootIsDirectory: boolean + reprocessStale?: boolean + singleInputReference?: 'input' | 'output-plan' +}): Promise { + if (reprocessStale || outputPath == null || outputRootIsDirectory) { + return false + } + + if (inputPaths.length === 0 || inputPaths.some((inputPath) => inputPath === stdinWithPath.path)) { + return false + } + + const [outputErr, outputStat] = await tryCatch(fsp.stat(outputPath)) + if (outputErr != null || outputStat == null) { + return false + } + + if (inputPaths.length === 1) { + if (singleInputReference === 'output-plan') { + return isMeaningfullyNewer(outputStat.mtime, outputPlanMtime) + } + + const [inputErr, inputStat] = await tryCatch(fsp.stat(inputPaths[0])) + if (inputErr != null || inputStat == null) { + return false + } + + return isMeaningfullyNewer(outputStat.mtime, inputStat.mtime) + } + + const inputStats = await Promise.all( + inputPaths.map(async (inputPath) => { + const [inputErr, inputStat] = await tryCatch(fsp.stat(inputPath)) + if (inputErr != null || inputStat == null) { + return null + } + return inputStat + }), + ) + + if (inputStats.some((inputStat) => inputStat == null)) { + return false + } + + return inputStats.every((inputStat) => { + return inputStat != null && isMeaningfullyNewer(outputStat.mtime, inputStat.mtime) + }) +} + +async function materializeAssemblyResults({ + abortSignal, + hasDirectoryInput, + inPath, + inputs, + normalizedResults, + outputMode, + outputPath, + outputRoot, + outputRootIsDirectory, + outputctl, + reservedPaths, + singleAssembly, +}: { + abortSignal: AbortSignal + hasDirectoryInput: boolean + inPath: string | null + inputs: string[] + normalizedResults: NormalizedAssemblyResults + outputMode?: 'directory' | 'file' + outputPath: string | null + outputRoot: string | null + outputRootIsDirectory: boolean + outputctl: IOutputCtl + reservedPaths: Set + singleAssembly?: boolean +}): Promise { + if (outputRoot == null) { + return + } + + const targets = await resolveResultDownloadTargets({ + hasDirectoryInput, + inPath, + inputs, + normalizedResults, + outputMode, + outputPath, + outputRoot, + outputRootIsDirectory, + reservedPaths, + singleAssembly, + }) + + for (const { resultUrl, targetPath } of targets) { + outputctl.debug('DOWNLOADING') + const [dlErr] = await tryCatch( + targetPath == null + ? downloadResultToStdout(resultUrl, abortSignal) + : downloadResultToFile(resultUrl, targetPath, abortSignal), + ) + if (dlErr) { + if (dlErr.name === 'AbortError') { + continue + } + outputctl.error(dlErr.message) + throw dlErr + } + } +} + class MyEventEmitter extends EventEmitter { protected hasEnded: boolean @@ -428,29 +825,25 @@ class MyEventEmitter extends EventEmitter { } class ReaddirJobEmitter extends MyEventEmitter { - constructor({ - dir, - streamRegistry, - recursive, - outstreamProvider, - topdir = dir, - }: ReaddirJobEmitterOptions) { + constructor({ dir, recursive, outputPlanProvider, topdir = dir }: ReaddirJobEmitterOptions) { super() process.nextTick(() => { - this.processDirectory({ dir, streamRegistry, recursive, outstreamProvider, topdir }).catch( - (err) => { - this.emit('error', err) - }, - ) + this.processDirectory({ + dir, + recursive, + outputPlanProvider, + topdir, + }).catch((err) => { + this.emit('error', err) + }) }) } private async processDirectory({ dir, - streamRegistry, recursive, - outstreamProvider, + outputPlanProvider, topdir, }: ReaddirJobEmitterOptions & { topdir: string }): Promise { const files = await fsp.readdir(dir) @@ -459,9 +852,7 @@ class ReaddirJobEmitter extends MyEventEmitter { for (const filename of files) { const file = path.normalize(path.join(dir, filename)) - pendingOperations.push( - this.processFile({ file, streamRegistry, recursive, outstreamProvider, topdir }), - ) + pendingOperations.push(this.processFile({ file, recursive, outputPlanProvider, topdir })) } await Promise.all(pendingOperations) @@ -470,15 +861,13 @@ class ReaddirJobEmitter extends MyEventEmitter { private async processFile({ file, - streamRegistry, recursive = false, - outstreamProvider, + outputPlanProvider, topdir, }: { file: string - streamRegistry: StreamRegistry recursive?: boolean - outstreamProvider: OutstreamProvider + outputPlanProvider: OutputPlanProvider topdir: string }): Promise { const stats = await fsp.stat(file) @@ -488,9 +877,8 @@ class ReaddirJobEmitter extends MyEventEmitter { await new Promise((resolve, reject) => { const subdirEmitter = new ReaddirJobEmitter({ dir: file, - streamRegistry, recursive, - outstreamProvider, + outputPlanProvider, topdir, }) subdirEmitter.on('job', (job: Job) => this.emit('job', job)) @@ -499,67 +887,51 @@ class ReaddirJobEmitter extends MyEventEmitter { }) } } else { - const existing = streamRegistry[file] - if (existing) existing.end() - const outstream = await outstreamProvider(file, topdir) - streamRegistry[file] = outstream ?? undefined - const instream = fs.createReadStream(file) - // Attach a no-op error handler to prevent unhandled errors if stream is destroyed - // before being consumed (e.g., due to output collision detection) - instream.on('error', () => {}) - this.emit('job', { in: instream, out: outstream }) + const outputPlan = await outputPlanProvider(file, topdir) + this.emit('job', { inputPath: getJobInputPath(file), out: outputPlan }) } } } class SingleJobEmitter extends MyEventEmitter { - constructor({ file, streamRegistry, outstreamProvider }: SingleJobEmitterOptions) { + constructor({ file, outputPlanProvider }: SingleJobEmitterOptions) { super() const normalizedFile = path.normalize(file) - const existing = streamRegistry[normalizedFile] - if (existing) existing.end() - outstreamProvider(normalizedFile).then((outstream) => { - streamRegistry[normalizedFile] = outstream ?? undefined - - let instream: Readable | null - if (normalizedFile === '-') { - if (tty.isatty(process.stdin.fd)) { - instream = null - } else { - instream = process.stdin - } - } else { - instream = fs.createReadStream(normalizedFile) - // Attach a no-op error handler to prevent unhandled errors if stream is destroyed - // before being consumed (e.g., due to output collision detection) - instream.on('error', () => {}) - } - - process.nextTick(() => { - this.emit('job', { in: instream, out: outstream }) - this.emit('end') + outputPlanProvider(normalizedFile) + .then((outputPlan) => { + process.nextTick(() => { + this.emit('job', { inputPath: getJobInputPath(normalizedFile), out: outputPlan }) + this.emit('end') + }) + }) + .catch((err: unknown) => { + process.nextTick(() => { + this.emit('error', ensureError(err)) + }) }) - }) } } class InputlessJobEmitter extends MyEventEmitter { - constructor({ - outstreamProvider, - }: { streamRegistry: StreamRegistry; outstreamProvider: OutstreamProvider }) { + constructor({ outputPlanProvider }: { outputPlanProvider: OutputPlanProvider }) { super() process.nextTick(() => { - outstreamProvider(null).then((outstream) => { - try { - this.emit('job', { in: null, out: outstream }) - } catch (err) { - this.emit('error', err) - } + outputPlanProvider(null) + .then((outputPlan) => { + try { + this.emit('job', { inputPath: null, out: outputPlan }) + } catch (err) { + this.emit('error', ensureError(err)) + return + } - this.emit('end') - }) + this.emit('end') + }) + .catch((err: unknown) => { + this.emit('error', ensureError(err)) + }) }) } } @@ -574,10 +946,10 @@ class NullJobEmitter extends MyEventEmitter { class WatchJobEmitter extends MyEventEmitter { private watcher: NodeWatcher | null = null - constructor({ file, streamRegistry, recursive, outstreamProvider }: WatchJobEmitterOptions) { + constructor({ file, recursive, outputPlanProvider }: WatchJobEmitterOptions) { super() - this.init({ file, streamRegistry, recursive, outstreamProvider }).catch((err) => { + this.init({ file, recursive, outputPlanProvider }).catch((err) => { this.emit('error', err) }) @@ -597,9 +969,8 @@ class WatchJobEmitter extends MyEventEmitter { private async init({ file, - streamRegistry, recursive, - outstreamProvider, + outputPlanProvider, }: WatchJobEmitterOptions): Promise { const stats = await fsp.stat(file) const topdir = stats.isDirectory() ? file : undefined @@ -614,7 +985,7 @@ class WatchJobEmitter extends MyEventEmitter { this.watcher.on('close', () => this.emit('end')) this.watcher.on('change', (_evt: string, filename: string) => { const normalizedFile = path.normalize(filename) - this.handleChange(normalizedFile, topdir, streamRegistry, outstreamProvider).catch((err) => { + this.handleChange(normalizedFile, topdir, outputPlanProvider).catch((err) => { this.emit('error', err) }) }) @@ -623,23 +994,17 @@ class WatchJobEmitter extends MyEventEmitter { private async handleChange( normalizedFile: string, topdir: string | undefined, - streamRegistry: StreamRegistry, - outstreamProvider: OutstreamProvider, + outputPlanProvider: OutputPlanProvider, ): Promise { const stats = await fsp.stat(normalizedFile) if (stats.isDirectory()) return - const existing = streamRegistry[normalizedFile] - if (existing) existing.end() - - const outstream = await outstreamProvider(normalizedFile, topdir) - streamRegistry[normalizedFile] = outstream ?? undefined - - const instream = fs.createReadStream(normalizedFile) - // Attach a no-op error handler to prevent unhandled errors if stream is destroyed - // before being consumed (e.g., due to output collision detection) - instream.on('error', () => {}) - this.emit('job', { in: instream, out: outstream }) + const outputPlan = await outputPlanProvider(normalizedFile, topdir) + this.emit('job', { + inputPath: getJobInputPath(normalizedFile), + out: outputPlan, + watchEvent: true, + }) } } @@ -697,12 +1062,21 @@ function detectConflicts(jobEmitter: EventEmitter): MyEventEmitter { jobEmitter.on('end', () => emitter.emit('end')) jobEmitter.on('error', (err: Error) => emitter.emit('error', err)) jobEmitter.on('job', (job: Job) => { - if (job.in == null || job.out == null) { + if (job.watchEvent) { + emitter.emit('job', job) + return + } + + if (job.inputPath == null || job.out == null) { + emitter.emit('job', job) + return + } + const inPath = job.inputPath + const outPath = job.out.path + if (outPath == null) { emitter.emit('job', job) return } - const inPath = (job.in as fs.ReadStream).path as string - const outPath = job.out.path as string if (Object.hasOwn(outfileAssociations, outPath) && outfileAssociations[outPath] !== inPath) { emitter.emit( 'error', @@ -724,12 +1098,12 @@ function dismissStaleJobs(jobEmitter: EventEmitter): MyEventEmitter { jobEmitter.on('end', () => Promise.all(pendingChecks).then(() => emitter.emit('end'))) jobEmitter.on('error', (err: Error) => emitter.emit('error', err)) jobEmitter.on('job', (job: Job) => { - if (job.in == null || job.out == null) { + if (job.inputPath == null || job.out == null) { emitter.emit('job', job) return } - const inPath = (job.in as fs.ReadStream).path as string + const inPath = job.inputPath const checkPromise = fsp .stat(inPath) .then((stats) => { @@ -747,12 +1121,23 @@ function dismissStaleJobs(jobEmitter: EventEmitter): MyEventEmitter { return emitter } +function passthroughJobs(jobEmitter: EventEmitter): MyEventEmitter { + const emitter = new MyEventEmitter() + + jobEmitter.on('end', () => emitter.emit('end')) + jobEmitter.on('error', (err: Error) => emitter.emit('error', err)) + jobEmitter.on('job', (job: Job) => emitter.emit('job', job)) + + return emitter +} + function makeJobEmitter( inputs: string[], { + allowOutputCollisions, recursive, - outstreamProvider, - streamRegistry, + outputPlanProvider, + singleAssembly, watch: watchOption, reprocessStale, }: JobEmitterOptions, @@ -765,35 +1150,43 @@ function makeJobEmitter( async function processInputs(): Promise { for (const input of inputs) { if (input === '-') { - emitterFns.push( - () => new SingleJobEmitter({ file: input, outstreamProvider, streamRegistry }), - ) + emitterFns.push(() => new SingleJobEmitter({ file: input, outputPlanProvider })) watcherFns.push(() => new NullJobEmitter()) } else { const stats = await fsp.stat(input) if (stats.isDirectory()) { emitterFns.push( () => - new ReaddirJobEmitter({ dir: input, recursive, outstreamProvider, streamRegistry }), + new ReaddirJobEmitter({ + dir: input, + recursive, + outputPlanProvider, + }), ) watcherFns.push( () => - new WatchJobEmitter({ file: input, recursive, outstreamProvider, streamRegistry }), + new WatchJobEmitter({ + file: input, + recursive, + outputPlanProvider, + }), ) } else { - emitterFns.push( - () => new SingleJobEmitter({ file: input, outstreamProvider, streamRegistry }), - ) + emitterFns.push(() => new SingleJobEmitter({ file: input, outputPlanProvider })) watcherFns.push( () => - new WatchJobEmitter({ file: input, recursive, outstreamProvider, streamRegistry }), + new WatchJobEmitter({ + file: input, + recursive, + outputPlanProvider, + }), ) } } } if (inputs.length === 0) { - emitterFns.push(() => new InputlessJobEmitter({ outstreamProvider, streamRegistry })) + emitterFns.push(() => new InputlessJobEmitter({ outputPlanProvider })) } startEmitting() @@ -818,14 +1211,18 @@ function makeJobEmitter( emitter.emit('error', err) }) - const stalefilter = reprocessStale ? (x: EventEmitter) => x as MyEventEmitter : dismissStaleJobs - return stalefilter(detectConflicts(emitter)) + const conflictFilter = allowOutputCollisions ? passthroughJobs : detectConflicts + const staleFilter = reprocessStale || singleAssembly ? passthroughJobs : dismissStaleJobs + + return staleFilter(conflictFilter(emitter)) } export interface AssembliesCreateOptions { steps?: string + stepsData?: StepsInput template?: string fields?: Record + outputMode?: 'directory' | 'file' watch?: boolean recursive?: boolean inputs: string[] @@ -844,8 +1241,10 @@ export async function create( client: Transloadit, { steps, + stepsData, template, fields, + outputMode, watch: watchOption, recursive, inputs, @@ -855,35 +1254,18 @@ export async function create( singleAssembly, concurrency = DEFAULT_CONCURRENCY, }: AssembliesCreateOptions, -): Promise<{ results: unknown[]; hasFailures: boolean }> { +): Promise<{ resultUrls: ResultUrlRow[]; results: unknown[]; hasFailures: boolean }> { // Quick fix for https://github.com/transloadit/transloadify/issues/13 // Only default to stdout when output is undefined (not provided), not when explicitly null let resolvedOutput = output if (resolvedOutput === undefined && !process.stdout.isTTY) resolvedOutput = '-' // Read steps file async before entering the Promise constructor - // We use StepsInput (the input type) rather than Steps (the transformed output type) + // We use StepsInput (the input type) rather than the transformed output type // to avoid zod adding default values that the API may reject - let stepsData: StepsInput | undefined + let effectiveStepsData = stepsData if (steps) { - const stepsContent = await fsp.readFile(steps, 'utf8') - const parsed: unknown = JSON.parse(stepsContent) - // Basic structural validation: must be an object with step names as keys - if (parsed == null || typeof parsed !== 'object' || Array.isArray(parsed)) { - throw new Error('Invalid steps format: expected an object with step names as keys') - } - // Validate each step has a robot field - for (const [stepName, step] of Object.entries(parsed)) { - if (step == null || typeof step !== 'object' || Array.isArray(step)) { - throw new Error(`Invalid steps format: step '${stepName}' must be an object`) - } - if (!('robot' in step) || typeof (step as Record).robot !== 'string') { - throw new Error( - `Invalid steps format: step '${stepName}' must have a 'robot' string property`, - ) - } - } - stepsData = parsed as StepsInput + effectiveStepsData = await readStepsInputFile(steps) } // Determine output stat async before entering the Promise constructor @@ -891,9 +1273,19 @@ export async function create( if (resolvedOutput != null) { const [err, stat] = await tryCatch(myStat(process.stdout, resolvedOutput)) if (err && (!isErrnoException(err) || err.code !== 'ENOENT')) throw err - outstat = stat ?? { isDirectory: () => false } + outstat = + stat ?? + ({ + isDirectory: () => outputMode === 'directory', + } satisfies StatLike) + + if (outputMode === 'directory' && stat != null && !stat.isDirectory()) { + const msg = 'Output must be a directory for this command' + outputctl.error(msg) + throw new Error(msg) + } - if (!outstat.isDirectory() && inputs.length !== 0) { + if (!outstat.isDirectory() && inputs.length !== 0 && !singleAssembly) { const firstInput = inputs[0] if (firstInput) { const firstInputStat = await myStat(process.stdin, firstInput) @@ -906,333 +1298,294 @@ export async function create( } } + const inputStats = await Promise.all( + inputs.map(async (input) => { + if (input === '-') return null + return await myStat(process.stdin, input) + }), + ) + const hasDirectoryInput = inputStats.some((stat) => stat?.isDirectory() === true) + return new Promise((resolve, reject) => { const params: CreateAssemblyParams = ( - stepsData ? { steps: stepsData as CreateAssemblyParams['steps'] } : { template_id: template } + effectiveStepsData + ? { steps: effectiveStepsData as CreateAssemblyParams['steps'] } + : { template_id: template } ) as CreateAssemblyParams if (fields) { params.fields = fields } - const outstreamProvider: OutstreamProvider = + const outputPlanProvider: OutputPlanProvider = resolvedOutput == null ? nullProvider() : outstat?.isDirectory() ? dirProvider(resolvedOutput) : fileProvider(resolvedOutput) - const streamRegistry: StreamRegistry = {} const emitter = makeJobEmitter(inputs, { + allowOutputCollisions: singleAssembly, + outputPlanProvider, recursive, watch: watchOption, - outstreamProvider, - streamRegistry, + singleAssembly, reprocessStale, }) // Use p-queue for concurrency management const queue = new PQueue({ concurrency }) const results: unknown[] = [] + const resultUrls: ResultUrlRow[] = [] + const reservedResultPaths = new Set() + const latestWatchJobTokenByOutputPath = new Map() let hasFailures = false + let nextWatchJobToken = 0 // AbortController to cancel all in-flight createAssembly calls when an error occurs const abortController = new AbortController() + const outputRootIsDirectory = Boolean(resolvedOutput != null && outstat?.isDirectory()) - // Helper to process a single assembly job - async function processAssemblyJob( - inPath: string | null, - outPath: string | null, - outMtime: Date | undefined, - ): Promise { - outputctl.debug(`PROCESSING JOB ${inPath ?? 'null'} ${outPath ?? 'null'}`) - - // Create fresh streams for this job - const inStream = inPath ? fs.createReadStream(inPath) : null - inStream?.on('error', () => {}) - - let superceded = false - // When writing to a file path (non-directory output), we treat finish as a supersede signal. - // Directory-output multi-download mode does not use a single shared outstream. - const markSupersededOnFinish = (stream: OutStream) => { - stream.on('finish', () => { - superceded = true - }) + function reserveWatchJobToken(outputPath: string | null): number | null { + if (!watchOption || outputPath == null) { + return null + } + + const token = ++nextWatchJobToken + latestWatchJobTokenByOutputPath.set(outputPath, token) + return token + } + + function isSupersededWatchJob(outputPath: string | null, token: number | null): boolean { + if (!watchOption || outputPath == null || token == null) { + return false } + return latestWatchJobTokenByOutputPath.get(outputPath) !== token + } + + function createAssemblyOptions({ + files, + uploads, + }: { + files?: Record + uploads?: Record + } = {}): CreateAssemblyOptions { const createOptions: CreateAssemblyOptions = { params, signal: abortController.signal, } - if (inStream != null) { - createOptions.uploads = { in: inStream } + if (files != null && Object.keys(files).length > 0) { + createOptions.files = files } + if (uploads != null && Object.keys(uploads).length > 0) { + createOptions.uploads = uploads + } + return createOptions + } + async function awaitCompletedAssembly(createOptions: CreateAssemblyOptions): Promise<{ + assembly: Awaited> + assemblyId: string + }> { const result = await client.createAssembly(createOptions) - if (superceded) return undefined - const assemblyId = result.assembly_id if (!assemblyId) throw new Error('No assembly_id in result') const assembly = await client.awaitAssemblyCompletion(assemblyId, { signal: abortController.signal, - onPoll: () => { - if (superceded) return false - return true - }, + onPoll: () => true, onAssemblyProgress: (status) => { outputctl.debug(`Assembly status: ${status.ok}`) }, }) - if (superceded) return undefined - if (assembly.error || (assembly.ok && assembly.ok !== 'ASSEMBLY_COMPLETED')) { const msg = `Assembly failed: ${assembly.error || assembly.message} (Status: ${assembly.ok})` outputctl.error(msg) throw new Error(msg) } + return { assembly, assemblyId } + } + + async function executeAssemblyLifecycle({ + createOptions, + inPath, + inputPaths, + outputPlan, + outputToken, + singleAssemblyMode, + }: { + createOptions: CreateAssemblyOptions + inPath: string | null + inputPaths: string[] + outputPlan: OutputPlan | null + outputToken: number | null + singleAssemblyMode?: boolean + }): Promise { + outputctl.debug(`PROCESSING JOB ${inPath ?? 'null'} ${outputPlan?.path ?? 'null'}`) + + const { assembly, assemblyId } = await awaitCompletedAssembly(createOptions) if (!assembly.results) throw new Error('No results in assembly') + const normalizedResults = normalizeAssemblyResults(assembly.results) - const outIsDirectory = Boolean(resolvedOutput != null && outstat?.isDirectory()) - const entries = Object.entries(assembly.results) - const allFiles: Array<{ - stepName: string - file: { name?: string; basename?: string; ext?: string; ssl_url?: string; url?: string } - }> = [] - for (const [stepName, stepResults] of entries) { - for (const file of stepResults as Array<{ - name?: string - basename?: string - ext?: string - ssl_url?: string - url?: string - }>) { - allFiles.push({ stepName, file }) - } + if (isSupersededWatchJob(outputPlan?.path ?? null, outputToken)) { + outputctl.debug( + `SKIPPED SUPERSEDED WATCH RESULT ${inPath ?? 'null'} ${outputPlan?.path ?? 'null'}`, + ) + return assembly } - const getFileUrl = (file: { ssl_url?: string; url?: string }): string | null => - file.ssl_url ?? file.url ?? null - - const sanitizeName = (value: string): string => { - const base = path.basename(value) - return base.replaceAll('\\', '_').replaceAll('/', '_').replaceAll('\u0000', '') + if ( + !singleAssemblyMode && + !watchOption && + (await shouldSkipStaleOutput({ + inputPaths, + outputPath: outputPlan?.path ?? null, + outputPlanMtime: outputPlan?.mtime ?? new Date(0), + outputRootIsDirectory, + reprocessStale, + })) + ) { + outputctl.debug(`SKIPPED STALE RESULT ${inPath ?? 'null'} ${outputPlan?.path ?? 'null'}`) + return assembly } - const ensureUniquePath = async (targetPath: string): Promise => { - const parsed = path.parse(targetPath) - let candidate = targetPath - let counter = 1 - while (true) { - const [statErr] = await tryCatch(fsp.stat(candidate)) - if (statErr) return candidate - candidate = path.join(parsed.dir, `${parsed.name}__${counter}${parsed.ext}`) - counter += 1 - } - } + resultUrls.push(...collectNormalizedResultUrlRows({ assemblyId, normalizedResults })) + + await materializeAssemblyResults({ + abortSignal: abortController.signal, + hasDirectoryInput: singleAssemblyMode ? false : hasDirectoryInput, + inPath, + inputs: inputPaths, + normalizedResults, + outputMode, + outputPath: outputPlan?.path ?? null, + outputRoot: resolvedOutput ?? null, + outputRootIsDirectory, + outputctl, + reservedPaths: reservedResultPaths, + singleAssembly: singleAssemblyMode, + }) - if (resolvedOutput != null && !superceded) { - // Directory output: - // - For single-result, input-backed jobs, preserve existing behavior (write to mapped file path). - // - Otherwise (multi-result or inputless), download all results into a directory structure. - if (outIsDirectory && (inPath == null || allFiles.length !== 1 || outPath == null)) { - let baseDir = resolvedOutput - if (inPath != null) { - let relpath = path.relative(process.cwd(), inPath) - relpath = relpath.replace(/^(\.\.\/)+/, '') - baseDir = path.join(resolvedOutput, path.dirname(relpath), path.parse(relpath).name) - } - await fsp.mkdir(baseDir, { recursive: true }) - - for (const { stepName, file } of allFiles) { - const resultUrl = getFileUrl(file) - if (!resultUrl) continue - - const stepDir = path.join(baseDir, stepName) - await fsp.mkdir(stepDir, { recursive: true }) - - const rawName = - file.name ?? - (file.basename && file.ext ? `${file.basename}.${file.ext}` : undefined) ?? - `${stepName}_result` - const safeName = sanitizeName(rawName) - const targetPath = await ensureUniquePath(path.join(stepDir, safeName)) - - outputctl.debug('DOWNLOADING') - const outStream = fs.createWriteStream(targetPath) as OutStream - outStream.on('error', () => {}) - const [dlErr] = await tryCatch( - pipeline(got.stream(resultUrl, { signal: abortController.signal }), outStream), - ) - if (dlErr) { - if (dlErr.name === 'AbortError') continue - outputctl.error(dlErr.message) - throw dlErr - } - } - } else if (!outIsDirectory && outPath != null) { - const first = allFiles[0] - const resultUrl = first ? getFileUrl(first.file) : null - if (resultUrl) { - outputctl.debug('DOWNLOADING') - const outStream = fs.createWriteStream(outPath) as OutStream - outStream.on('error', () => {}) - outStream.mtime = outMtime - markSupersededOnFinish(outStream) - - const [dlErr] = await tryCatch( - pipeline(got.stream(resultUrl, { signal: abortController.signal }), outStream), - ) - if (dlErr) { - if (dlErr.name !== 'AbortError') { - outputctl.error(dlErr.message) - throw dlErr - } - } - } - } else if (outIsDirectory && outPath != null) { - // Single-result, input-backed job: preserve existing file mapping in outdir. - const first = allFiles[0] - const resultUrl = first ? getFileUrl(first.file) : null - if (resultUrl) { - outputctl.debug('DOWNLOADING') - const outStream = fs.createWriteStream(outPath) as OutStream - outStream.on('error', () => {}) - outStream.mtime = outMtime - markSupersededOnFinish(outStream) - - const [dlErr] = await tryCatch( - pipeline(got.stream(resultUrl, { signal: abortController.signal }), outStream), - ) - if (dlErr) { - if (dlErr.name !== 'AbortError') { - outputctl.error(dlErr.message) - throw dlErr - } - } + outputctl.debug(`COMPLETED ${inPath ?? 'null'} ${outputPlan?.path ?? 'null'}`) + + if (del) { + for (const inputPath of inputPaths) { + if (inputPath === stdinWithPath.path) { + continue } + await fsp.unlink(inputPath) } } + return assembly + } - outputctl.debug(`COMPLETED ${inPath ?? 'null'} ${outPath ?? 'null'}`) + // Helper to process a single assembly job + async function processAssemblyJob( + inPath: string | null, + outputPlan: OutputPlan | null, + outputToken: number | null, + ): Promise { + const files = + inPath != null && inPath !== stdinWithPath.path + ? { + in: inPath, + } + : undefined + const uploads = + inPath === stdinWithPath.path + ? { + in: createInputUploadStream(inPath), + } + : undefined + + return await executeAssemblyLifecycle({ + createOptions: createAssemblyOptions({ files, uploads }), + inPath, + inputPaths: inPath == null ? [] : [inPath], + outputPlan, + outputToken, + }) + } - if (del && inPath) { - await fsp.unlink(inPath) - } - return assembly + function handleEmitterError(err: Error): void { + abortController.abort() + queue.clear() + outputctl.error(err) + reject(err) } - if (singleAssembly) { - // Single-assembly mode: collect file paths, then create one assembly with all inputs - // We close streams immediately to avoid exhausting file descriptors with many files + function runSingleAssemblyEmitter(): void { const collectedPaths: string[] = [] + let inputlessOutputPlan: OutputPlan | null = null emitter.on('job', (job: Job) => { - if (job.in != null) { - const inPath = (job.in as fs.ReadStream).path as string + if (job.inputPath != null) { + const inPath = job.inputPath outputctl.debug(`COLLECTING JOB ${inPath}`) collectedPaths.push(inPath) - // Close the stream immediately to avoid file descriptor exhaustion - ;(job.in as fs.ReadStream).destroy() - outputctl.debug(`STREAM CLOSED ${inPath}`) + return } - }) - emitter.on('error', (err: Error) => { - abortController.abort() - queue.clear() - outputctl.error(err) - reject(err) + inputlessOutputPlan = job.out ?? null }) emitter.on('end', async () => { - if (collectedPaths.length === 0) { - resolve({ results: [], hasFailures: false }) + if ( + await shouldSkipStaleOutput({ + inputPaths: collectedPaths, + outputPath: resolvedOutput ?? null, + outputPlanMtime: new Date(0), + outputRootIsDirectory, + reprocessStale, + singleInputReference: 'input', + }) + ) { + outputctl.debug(`SKIPPED STALE SINGLE ASSEMBLY ${resolvedOutput ?? 'null'}`) + resolve({ resultUrls, results: [], hasFailures: false }) return } - // Build uploads object, creating fresh streams for each file + // Preserve original basenames/extensions for filesystem uploads so the backend + // can infer types like Markdown correctly. + const files: Record = {} const uploads: Record = {} const inputPaths: string[] = [] for (const inPath of collectedPaths) { const basename = path.basename(inPath) - let key = basename - let counter = 1 - while (key in uploads) { - key = `${path.parse(basename).name}_${counter}${path.parse(basename).ext}` - counter++ + const collection = inPath === stdinWithPath.path ? uploads : files + const key = await ensureUniqueCounterValue({ + initialValue: basename, + isTaken: (candidate) => candidate in collection, + nextValue: (counter) => + `${path.parse(basename).name}_${counter}${path.parse(basename).ext}`, + reserve: () => {}, + scope: collection, + }) + if (inPath === stdinWithPath.path) { + uploads[key] = createInputUploadStream(inPath) + } else { + files[key] = inPath } - uploads[key] = fs.createReadStream(inPath) inputPaths.push(inPath) } - outputctl.debug(`Creating single assembly with ${Object.keys(uploads).length} files`) + outputctl.debug( + `Creating single assembly with ${Object.keys(files).length + Object.keys(uploads).length} files`, + ) try { const assembly = await queue.add(async () => { - const createOptions: CreateAssemblyOptions = { - params, - signal: abortController.signal, - } - if (Object.keys(uploads).length > 0) { - createOptions.uploads = uploads - } - - const result = await client.createAssembly(createOptions) - const assemblyId = result.assembly_id - if (!assemblyId) throw new Error('No assembly_id in result') - - const asm = await client.awaitAssemblyCompletion(assemblyId, { - signal: abortController.signal, - onAssemblyProgress: (status) => { - outputctl.debug(`Assembly status: ${status.ok}`) - }, + return await executeAssemblyLifecycle({ + createOptions: createAssemblyOptions({ files, uploads }), + inPath: null, + inputPaths, + outputPlan: + inputlessOutputPlan ?? + (resolvedOutput == null ? null : createOutputPlan(resolvedOutput, new Date(0))), + outputToken: null, + singleAssemblyMode: true, }) - - if (asm.error || (asm.ok && asm.ok !== 'ASSEMBLY_COMPLETED')) { - const msg = `Assembly failed: ${asm.error || asm.message} (Status: ${asm.ok})` - outputctl.error(msg) - throw new Error(msg) - } - - // Download all results - if (asm.results && resolvedOutput != null) { - for (const [stepName, stepResults] of Object.entries(asm.results)) { - for (const stepResult of stepResults) { - const resultUrl = - (stepResult as { ssl_url?: string; url?: string }).ssl_url ?? stepResult.url - if (!resultUrl) continue - - let outPath: string - if (outstat?.isDirectory()) { - outPath = path.join(resolvedOutput, stepResult.name || `${stepName}_result`) - } else { - outPath = resolvedOutput - } - - outputctl.debug(`DOWNLOADING ${stepResult.name} to ${outPath}`) - const [dlErr] = await tryCatch( - pipeline( - got.stream(resultUrl, { signal: abortController.signal }), - fs.createWriteStream(outPath), - ), - ) - if (dlErr) { - if (dlErr.name === 'AbortError') continue - outputctl.error(dlErr.message) - throw dlErr - } - } - } - } - - // Delete input files if requested - if (del) { - for (const inPath of inputPaths) { - await fsp.unlink(inPath) - } - } - return asm }) results.push(assembly) } catch (err) { @@ -1240,30 +1593,19 @@ export async function create( outputctl.error(err as Error) } - resolve({ results, hasFailures }) + resolve({ resultUrls, results, hasFailures }) }) - } else { - // Default mode: one assembly per file with p-queue concurrency limiting - emitter.on('job', (job: Job) => { - const inPath = job.in - ? (((job.in as fs.ReadStream).path as string | undefined) ?? null) - : null - const outPath = job.out?.path ?? null - const outMtime = job.out?.mtime - outputctl.debug(`GOT JOB ${inPath ?? 'null'} ${outPath ?? 'null'}`) - - // Close the original streams immediately - we'll create fresh ones when processing - if (job.in != null) { - ;(job.in as fs.ReadStream).destroy() - } - if (job.out != null) { - job.out.destroy() - } + } - // Add job to queue - p-queue handles concurrency automatically + function runPerFileEmitter(): void { + emitter.on('job', (job: Job) => { + const inPath = job.inputPath + const outputPlan = job.out + const outputToken = reserveWatchJobToken(outputPlan?.path ?? null) + outputctl.debug(`GOT JOB ${inPath ?? 'null'} ${outputPlan?.path ?? 'null'}`) queue .add(async () => { - const result = await processAssemblyJob(inPath, outPath, outMtime) + const result = await processAssemblyJob(inPath, outputPlan, outputToken) if (result !== undefined) { results.push(result) } @@ -1274,19 +1616,19 @@ export async function create( }) }) - emitter.on('error', (err: Error) => { - abortController.abort() - queue.clear() - outputctl.error(err) - reject(err) - }) - emitter.on('end', async () => { - // Wait for all queued jobs to complete await queue.onIdle() - resolve({ results, hasFailures }) + resolve({ resultUrls, results, hasFailures }) }) } + + emitter.on('error', handleEmitterError) + + if (singleAssembly) { + runSingleAssemblyEmitter() + } else { + runPerFileEmitter() + } }) } @@ -1330,9 +1672,7 @@ export class AssembliesCreateCommand extends AuthenticatedCommand { description: 'Specify a template to use for these assemblies', }) - inputs = Option.Array('--input,-i', { - description: 'Provide an input file or a directory', - }) + inputs = inputPathsOption() outputPath = Option.String('--output,-o', { description: 'Specify an output file or directory', @@ -1342,30 +1682,19 @@ export class AssembliesCreateCommand extends AuthenticatedCommand { description: 'Set a template field (KEY=VAL)', }) - watch = Option.Boolean('--watch,-w', false, { - description: 'Watch inputs for changes', - }) + watch = watchOption() - recursive = Option.Boolean('--recursive,-r', false, { - description: 'Enumerate input directories recursively', - }) + recursive = recursiveOption() - deleteAfterProcessing = Option.Boolean('--delete-after-processing,-d', false, { - description: 'Delete input files after they are processed', - }) + deleteAfterProcessing = deleteAfterProcessingOption() - reprocessStale = Option.Boolean('--reprocess-stale', false, { - description: 'Process inputs even if output is newer', - }) + reprocessStale = reprocessStaleOption() - singleAssembly = Option.Boolean('--single-assembly', false, { - description: 'Pass all input files to a single assembly instead of one assembly per file', - }) + singleAssembly = singleAssemblyOption() - concurrency = Option.String('--concurrency,-c', { - description: 'Maximum number of concurrent assemblies (default: 5)', - validator: t.isNumber(), - }) + concurrency = concurrencyOption() + + printUrls = printUrlsOption() protected async run(): Promise { if (!this.steps && !this.template) { @@ -1378,10 +1707,6 @@ export class AssembliesCreateCommand extends AuthenticatedCommand { } const inputList = this.inputs ?? [] - if (inputList.length === 0 && this.watch) { - this.output.error('assemblies create --watch requires at least one input') - return 1 - } // Default to stdin only for `--steps` mode (common "pipe a file into a one-off assembly" use case). // For `--template` mode, templates may be inputless or use /http/import, so stdin should be explicit (`--input -`). @@ -1389,27 +1714,26 @@ export class AssembliesCreateCommand extends AuthenticatedCommand { inputList.push('-') } - const fieldsMap: Record = {} - for (const field of this.fields ?? []) { - const eqIndex = field.indexOf('=') - if (eqIndex === -1) { - this.output.error(`invalid argument for --field: '${field}'`) - return 1 - } - const key = field.slice(0, eqIndex) - const value = field.slice(eqIndex + 1) - fieldsMap[key] = value + const fieldsMap = parseTemplateFieldAssignments(this.output, this.fields) + if (this.fields != null && fieldsMap == null) { + return 1 } - if (this.singleAssembly && this.watch) { - this.output.error('--single-assembly cannot be used with --watch') + const sharedValidationError = validateSharedFileProcessingOptions({ + explicitInputCount: this.inputs?.length ?? 0, + singleAssembly: this.singleAssembly, + watch: this.watch, + watchRequiresInputsMessage: 'assemblies create --watch requires at least one input', + }) + if (sharedValidationError != null) { + this.output.error(sharedValidationError) return 1 } - const { hasFailures } = await create(this.output, this.client, { + const { hasFailures, resultUrls } = await create(this.output, this.client, { steps: this.steps, template: this.template, - fields: fieldsMap, + fields: fieldsMap ?? {}, watch: this.watch, recursive: this.recursive, inputs: inputList, @@ -1417,8 +1741,11 @@ export class AssembliesCreateCommand extends AuthenticatedCommand { del: this.deleteAfterProcessing, reprocessStale: this.reprocessStale, singleAssembly: this.singleAssembly, - concurrency: this.concurrency, + concurrency: this.concurrency == null ? undefined : Number(this.concurrency), }) + if (this.printUrls) { + printResultUrls(this.output, resultUrls) + } return hasFailures ? 1 : undefined } } @@ -1567,20 +1894,13 @@ export class AssembliesReplayCommand extends AuthenticatedCommand { assemblyIds = Option.Rest({ required: 1 }) protected async run(): Promise { - const fieldsMap: Record = {} - for (const field of this.fields ?? []) { - const eqIndex = field.indexOf('=') - if (eqIndex === -1) { - this.output.error(`invalid argument for --field: '${field}'`) - return 1 - } - const key = field.slice(0, eqIndex) - const value = field.slice(eqIndex + 1) - fieldsMap[key] = value + const fieldsMap = parseTemplateFieldAssignments(this.output, this.fields) + if (this.fields != null && fieldsMap == null) { + return 1 } await replay(this.output, this.client, { - fields: fieldsMap, + fields: fieldsMap ?? {}, reparse: this.reparseTemplate, steps: this.steps, notify_url: this.notifyUrl, diff --git a/packages/node/src/cli/commands/index.ts b/packages/node/src/cli/commands/index.ts index 8f048784..b76456ee 100644 --- a/packages/node/src/cli/commands/index.ts +++ b/packages/node/src/cli/commands/index.ts @@ -1,7 +1,7 @@ import { Builtins, Cli } from 'clipanion' import packageJson from '../../../package.json' with { type: 'json' } - +import { intentCommands } from '../intentCommands.ts' import { AssembliesCreateCommand, AssembliesDeleteCommand, @@ -10,9 +10,7 @@ import { AssembliesListCommand, AssembliesReplayCommand, } from './assemblies.ts' - import { SignatureCommand, SmartCdnSignatureCommand, TokenCommand } from './auth.ts' - import { BillsGetCommand } from './bills.ts' import { DocsRobotsGetCommand, DocsRobotsListCommand } from './docs.ts' import { NotificationsReplayCommand } from './notifications.ts' @@ -71,5 +69,10 @@ export function createCli(): Cli { cli.register(DocsRobotsListCommand) cli.register(DocsRobotsGetCommand) + // Intent-first commands + for (const command of intentCommands) { + cli.register(command) + } + return cli } diff --git a/packages/node/src/cli/commands/templates.ts b/packages/node/src/cli/commands/templates.ts index a2f2bffe..031649b1 100644 --- a/packages/node/src/cli/commands/templates.ts +++ b/packages/node/src/cli/commands/templates.ts @@ -5,12 +5,11 @@ import { Command, Option } from 'clipanion' import rreaddir from 'recursive-readdir' import { z } from 'zod' import { tryCatch } from '../../alphalib/tryCatch.ts' -import type { Steps } from '../../alphalib/types/template.ts' -import { stepsSchema } from '../../alphalib/types/template.ts' import type { TemplateContent } from '../../apiTypes.ts' import type { Transloadit } from '../../Transloadit.ts' import { createReadStream, formatAPIError, streamToBuffer } from '../helpers.ts' import type { IOutputCtl } from '../OutputCtl.ts' +import { parseStepsInputJson } from '../stepsInput.ts' import ModifiedLookup from '../template-last-modified.ts' import type { TemplateFile } from '../types.ts' import { ensureError, isTransloaditAPIError, TemplateFileDataSchema } from '../types.ts' @@ -60,16 +59,11 @@ export async function create( try { const buf = await streamToBuffer(createReadStream(file)) - const parsed: unknown = JSON.parse(buf.toString()) - const validated = stepsSchema.safeParse(parsed) - if (!validated.success) { - throw new Error(`Invalid template steps format: ${validated.error.message}`) - } + const steps = parseStepsInputJson(buf.toString()) const result = await client.createTemplate({ name, - // Steps (validated) is assignable to StepsInput at runtime; cast for TS - template: { steps: validated.data } as TemplateContent, + template: { steps } as TemplateContent, }) output.print(result.id, result) return result @@ -106,23 +100,18 @@ export async function modify( try { const buf = await streamToBuffer(createReadStream(file)) - let steps: Steps | null = null + let steps: TemplateContent['steps'] | null = null let newName = name if (buf.length > 0) { - const parsed: unknown = JSON.parse(buf.toString()) - const validated = stepsSchema.safeParse(parsed) - if (!validated.success) { - throw new Error(`Invalid template steps format: ${validated.error.message}`) - } - steps = validated.data + steps = parseStepsInputJson(buf.toString()) as TemplateContent['steps'] } if (!name || buf.length === 0) { const tpl = await client.getTemplate(template) if (!name) newName = tpl.name if (buf.length === 0 && tpl.content.steps) { - steps = tpl.content.steps + steps = tpl.content.steps as TemplateContent['steps'] } } diff --git a/packages/node/src/cli/fileProcessingOptions.ts b/packages/node/src/cli/fileProcessingOptions.ts new file mode 100644 index 00000000..0dd4716a --- /dev/null +++ b/packages/node/src/cli/fileProcessingOptions.ts @@ -0,0 +1,294 @@ +import { Option } from 'clipanion' +import * as t from 'typanion' + +export interface SharedCliOptionDocumentation { + description: string + example: string + flags: string + required: string + type: string +} + +interface SharedCliOptionDefinition { + docs: SharedCliOptionDocumentation + optionFlags: string +} + +interface SharedCliOptionExports { + docs: (description?: string) => SharedCliOptionDocumentation + option: (description?: string) => T +} + +export interface SharedFileProcessingValidationInput { + explicitInputCount: number + singleAssembly: boolean + watch: boolean + watchRequiresInputsMessage: string +} + +const inputPathsOptionDefinition = { + docs: { + flags: '--input, -i', + type: 'path | dir | url | -', + required: 'varies', + example: 'input.file', + description: 'Provide an input path, directory, URL, or - for stdin', + }, + optionFlags: '--input,-i', +} as const satisfies SharedCliOptionDefinition + +const recursiveOptionDefinition = { + docs: { + flags: '--recursive, -r', + type: 'boolean', + required: 'no', + example: 'false', + description: 'Enumerate input directories recursively', + }, + optionFlags: '--recursive,-r', +} as const satisfies SharedCliOptionDefinition + +const deleteAfterProcessingOptionDefinition = { + docs: { + flags: '--delete-after-processing, -d', + type: 'boolean', + required: 'no', + example: 'false', + description: 'Delete input files after they are processed', + }, + optionFlags: '--delete-after-processing,-d', +} as const satisfies SharedCliOptionDefinition + +const reprocessStaleOptionDefinition = { + docs: { + flags: '--reprocess-stale', + type: 'boolean', + required: 'no', + example: 'false', + description: 'Process inputs even if output is newer', + }, + optionFlags: '--reprocess-stale', +} as const satisfies SharedCliOptionDefinition + +const watchOptionDefinition = { + docs: { + flags: '--watch, -w', + type: 'boolean', + required: 'no', + example: 'false', + description: 'Watch inputs for changes', + }, + optionFlags: '--watch,-w', +} as const satisfies SharedCliOptionDefinition + +const singleAssemblyOptionDefinition = { + docs: { + flags: '--single-assembly', + type: 'boolean', + required: 'no', + example: 'false', + description: 'Pass all input files to a single assembly instead of one assembly per file', + }, + optionFlags: '--single-assembly', +} as const satisfies SharedCliOptionDefinition + +const concurrencyOptionDefinition = { + docs: { + flags: '--concurrency, -c', + type: 'number', + required: 'no', + example: '5', + description: 'Maximum number of concurrent assemblies (default: 5)', + }, + optionFlags: '--concurrency,-c', +} as const satisfies SharedCliOptionDefinition + +const printUrlsOptionDefinition = { + docs: { + flags: '--print-urls', + type: 'boolean', + required: 'no', + example: 'false', + description: 'Print temporary result URLs after completion', + }, + optionFlags: '--print-urls', +} as const satisfies SharedCliOptionDefinition + +function getSharedCliOptionDocumentation( + definition: SharedCliOptionDefinition, + description = definition.docs.description, +): SharedCliOptionDocumentation { + return { + ...definition.docs, + description, + } +} + +function arrayOption( + definition: SharedCliOptionDefinition, + description = definition.docs.description, +): string[] { + return Option.Array(definition.optionFlags, { + description, + }) as unknown as string[] +} + +function booleanOption( + definition: SharedCliOptionDefinition, + description = definition.docs.description, +): boolean { + return Option.Boolean(definition.optionFlags, false, { + description, + }) as unknown as boolean +} + +function createArrayOptionExports( + definition: SharedCliOptionDefinition, +): SharedCliOptionExports { + return { + docs: (description = definition.docs.description) => + getSharedCliOptionDocumentation(definition, description), + option: (description = definition.docs.description) => arrayOption(definition, description), + } +} + +function createBooleanOptionExports( + definition: SharedCliOptionDefinition, +): SharedCliOptionExports { + return { + docs: (description = definition.docs.description) => + getSharedCliOptionDocumentation(definition, description), + option: (description = definition.docs.description) => booleanOption(definition, description), + } +} + +const inputPathsOptionExports = createArrayOptionExports(inputPathsOptionDefinition) +const recursiveOptionExports = createBooleanOptionExports(recursiveOptionDefinition) +const deleteAfterProcessingOptionExports = createBooleanOptionExports( + deleteAfterProcessingOptionDefinition, +) +const reprocessStaleOptionExports = createBooleanOptionExports(reprocessStaleOptionDefinition) +const watchOptionExports = createBooleanOptionExports(watchOptionDefinition) +const singleAssemblyOptionExports = createBooleanOptionExports(singleAssemblyOptionDefinition) + +export function getInputPathsOptionDocumentation( + description = inputPathsOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return inputPathsOptionExports.docs(description) +} + +export function inputPathsOption( + description = inputPathsOptionDefinition.docs.description, +): string[] { + return inputPathsOptionExports.option(description) +} + +export function getRecursiveOptionDocumentation( + description = recursiveOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return recursiveOptionExports.docs(description) +} + +export function recursiveOption(description = recursiveOptionDefinition.docs.description): boolean { + return recursiveOptionExports.option(description) +} + +export function getDeleteAfterProcessingOptionDocumentation( + description = deleteAfterProcessingOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return deleteAfterProcessingOptionExports.docs(description) +} + +export function deleteAfterProcessingOption( + description = deleteAfterProcessingOptionDefinition.docs.description, +): boolean { + return deleteAfterProcessingOptionExports.option(description) +} + +export function getReprocessStaleOptionDocumentation( + description = reprocessStaleOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return reprocessStaleOptionExports.docs(description) +} + +export function reprocessStaleOption( + description = reprocessStaleOptionDefinition.docs.description, +): boolean { + return reprocessStaleOptionExports.option(description) +} + +export function getWatchOptionDocumentation( + description = watchOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return watchOptionExports.docs(description) +} + +export function watchOption(description = watchOptionDefinition.docs.description): boolean { + return watchOptionExports.option(description) +} + +export function getSingleAssemblyOptionDocumentation( + description = singleAssemblyOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return singleAssemblyOptionExports.docs(description) +} + +export function singleAssemblyOption( + description = singleAssemblyOptionDefinition.docs.description, +): boolean { + return singleAssemblyOptionExports.option(description) +} + +export function getConcurrencyOptionDocumentation( + description = concurrencyOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return getSharedCliOptionDocumentation(concurrencyOptionDefinition, description) +} + +export function concurrencyOption( + description = concurrencyOptionDefinition.docs.description, +): number | undefined { + return Option.String(concurrencyOptionDefinition.optionFlags, { + description, + validator: t.applyCascade(t.isNumber(), [t.isAtLeast(1)]), + }) as unknown as number | undefined +} + +export function getPrintUrlsOptionDocumentation( + description = printUrlsOptionDefinition.docs.description, +): SharedCliOptionDocumentation { + return getSharedCliOptionDocumentation(printUrlsOptionDefinition, description) +} + +export function printUrlsOption(description = printUrlsOptionDefinition.docs.description): boolean { + return Option.Boolean(printUrlsOptionDefinition.optionFlags, { + description, + }) as unknown as boolean +} + +export function countProvidedInputs({ + inputBase64, + inputs, +}: { + inputBase64?: string[] + inputs?: string[] +}): number { + return (inputs ?? []).length + (inputBase64 ?? []).length +} + +export function validateSharedFileProcessingOptions({ + explicitInputCount, + singleAssembly, + watch, + watchRequiresInputsMessage, +}: SharedFileProcessingValidationInput): string | undefined { + if (watch && explicitInputCount === 0) { + return watchRequiresInputsMessage + } + + if (watch && singleAssembly) { + return '--single-assembly cannot be used with --watch' + } + + return undefined +} diff --git a/packages/node/src/cli/generateIntentDocs.ts b/packages/node/src/cli/generateIntentDocs.ts new file mode 100644 index 00000000..878f18b3 --- /dev/null +++ b/packages/node/src/cli/generateIntentDocs.ts @@ -0,0 +1,419 @@ +import { mkdir, readFile, writeFile } from 'node:fs/promises' +import { dirname } from 'node:path' +import { + getConcurrencyOptionDocumentation, + getDeleteAfterProcessingOptionDocumentation, + getInputPathsOptionDocumentation, + getPrintUrlsOptionDocumentation, + getRecursiveOptionDocumentation, + getReprocessStaleOptionDocumentation, + getSingleAssemblyOptionDocumentation, + getWatchOptionDocumentation, +} from './fileProcessingOptions.ts' +import type { IntentDefinition } from './intentCommandSpecs.ts' +import type { ResolvedIntentCommandDefinition } from './intentCommands.ts' +import { resolveIntentCommandDefinitions } from './intentCommands.ts' +import type { IntentOptionDefinition } from './intentRuntime.ts' +import { getInputBase64OptionDocumentation, getIntentOptionDefinitions } from './intentRuntime.ts' + +interface DocOptionRow { + description: string + example: string + flags: string + required: string + type: string +} + +const MAX_OPTION_DESCRIPTION_LENGTH = 180 + +function inlineCode(value: string): string { + return `\`${value.replaceAll('`', '\\`')}\`` +} + +function escapeTableCell(value: string): string { + return value.replaceAll('\n', ' ').replaceAll('|', '\\|') +} + +function renderTable(headers: string[], rows: string[][]): string { + const renderedRows = rows.map((row) => `| ${row.map(escapeTableCell).join(' | ')} |`) + return [ + `| ${headers.join(' | ')} |`, + `| ${headers.map(() => '---').join(' | ')} |`, + ...renderedRows, + ].join('\n') +} + +function sanitizeDocsMarkdown(value: string): string { + return value + .replace(/!?\[([^\]]+)\]\([^)]+\)/g, '$1') + .replace(/<[^>]+>/g, ' ') + .replace(/```[\s\S]*?```/g, ' ') + .replace(/\{\{[\s\S]*?\}\}/g, ' ') + .replaceAll('`', '') + .replace(/\s+/g, ' ') + .trim() +} + +function truncateAtSentenceBoundary(value: string, maxLength: number): string { + if (value.length <= maxLength) { + return value + } + + const sentenceMatch = value.match(/^(.{1,180}?[.!?])(?:\s|$)/) + if (sentenceMatch?.[1] != null && sentenceMatch[1].length >= 60) { + return sentenceMatch[1] + } + + const truncated = value.slice(0, maxLength).trimEnd() + const lastSpace = truncated.lastIndexOf(' ') + if (lastSpace > 40) { + return `${truncated.slice(0, lastSpace)}…` + } + + return `${truncated}…` +} + +function summarizeDescription(value: string | undefined): string { + if (value == null || value.trim().length === 0) { + return '—' + } + + const sanitized = sanitizeDocsMarkdown(value) + + if (sanitized.length === 0) { + return '—' + } + + return truncateAtSentenceBoundary(sanitized, MAX_OPTION_DESCRIPTION_LENGTH) +} + +function getInputSummary(definition: ResolvedIntentCommandDefinition): string { + if (definition.runnerKind === 'no-input') { + return 'none' + } + + return 'file, dir, URL, base64' +} + +function getOutputSummary(definition: ResolvedIntentCommandDefinition): string { + return definition.intentDefinition.outputMode === 'directory' ? 'directory' : 'file' +} + +function getExecutionSummary(definition: ResolvedIntentCommandDefinition): string { + switch (definition.runnerKind) { + case 'bundled': + return 'single assembly' + case 'no-input': + return 'no input' + case 'standard': + return 'per-file; supports `--single-assembly` and `--watch`' + case 'watchable': + return 'per-file; supports `--watch`' + } +} + +function getBackendSummary(catalogDefinition: IntentDefinition): string { + if (catalogDefinition.kind === 'robot') { + return inlineCode(catalogDefinition.robot) + } + + if (catalogDefinition.kind === 'template') { + return inlineCode(catalogDefinition.templateId) + } + + return `semantic alias ${inlineCode(catalogDefinition.semantic)}` +} + +function getUsage(definition: ResolvedIntentCommandDefinition): string { + const parts = ['npx transloadit', ...definition.paths] + if (definition.runnerKind !== 'no-input') { + parts.push('--input', '') + } + parts.push('[options]') + return parts.join(' ') +} + +function formatOptionType(kind: IntentOptionDefinition['kind']): string { + switch (kind) { + case 'auto': + return 'auto' + case 'boolean': + return 'boolean' + case 'json': + return 'json' + case 'number': + return 'number' + case 'string': + return 'string' + case 'string-array': + return 'string[]' + } +} + +function getExampleValue(field: IntentOptionDefinition): string { + const candidate = field.exampleValue + if (typeof candidate === 'string' && candidate.length > 0) { + return candidate + } + + return '—' +} + +function getCommandOptionRows(definition: ResolvedIntentCommandDefinition): DocOptionRow[] { + return getIntentOptionDefinitions(definition.intentDefinition).map((field) => ({ + flags: field.optionFlags, + type: formatOptionType(field.kind), + required: field.required ? 'yes' : 'no', + example: getExampleValue(field), + description: summarizeDescription(field.description), + })) +} + +function getSharedFileInputOutputRows(): DocOptionRow[] { + return [ + getInputPathsOptionDocumentation(), + getInputBase64OptionDocumentation(), + { + flags: '--out, -o', + type: 'path', + required: 'yes*', + example: 'output.file', + description: 'Write the result to this path or directory', + }, + getPrintUrlsOptionDocumentation(), + ] +} + +function getSharedNoInputOutputRows(): DocOptionRow[] { + return [ + { + flags: '--out, -o', + type: 'path', + required: 'yes*', + example: 'output.file', + description: 'Write the result to this path', + }, + getPrintUrlsOptionDocumentation(), + ] +} + +function getSharedProcessingRows(): DocOptionRow[] { + return [ + getRecursiveOptionDocumentation(), + getDeleteAfterProcessingOptionDocumentation(), + getReprocessStaleOptionDocumentation(), + ] +} + +function getSharedWatchRows(): DocOptionRow[] { + return [getWatchOptionDocumentation(), getConcurrencyOptionDocumentation()] +} + +function getSharedBundlingRows(): DocOptionRow[] { + return [getSingleAssemblyOptionDocumentation()] +} + +function getSharedFlagSupportNotes(definition: ResolvedIntentCommandDefinition): string[] { + if (definition.runnerKind === 'no-input') { + return ['Uses the shared output flags listed above.'] + } + + const notes = ['Uses the shared file input and output flags listed above.'] + const processingGroups = ['base processing flags'] + + if (definition.runnerKind === 'standard' || definition.runnerKind === 'watchable') { + processingGroups.push('watch flags') + } + + if (definition.runnerKind === 'standard') { + processingGroups.push('bundling flags') + } + + notes.push(`Also supports the shared ${processingGroups.join(', ')} listed above.`) + + return notes +} + +function renderOptionSection(title: string, rows: DocOptionRow[]): string[] { + if (rows.length === 0) { + return [] + } + + return [ + `**${title}**`, + '', + renderTable( + ['Flag', 'Type', 'Required', 'Example', 'Description'], + rows.map((row) => [ + inlineCode(row.flags), + inlineCode(row.type), + row.required, + row.example === '—' ? row.example : inlineCode(row.example), + row.description, + ]), + ), + '', + ] +} + +function renderExamples(examples: Array<[string, string]>): string { + const lines: string[] = ['```bash'] + + for (const [label, command] of examples) { + if (examples.length > 1 || label !== 'Run the command') { + lines.push(`# ${label}`) + } + lines.push(command) + } + + lines.push('```') + return lines.join('\n') +} + +function renderIntentSection( + definition: ResolvedIntentCommandDefinition, + headingLevel: number, +): string { + const heading = '#'.repeat(headingLevel) + const commandLabel = definition.paths.join(' ') + const lines: string[] = [ + `${heading} ${inlineCode(commandLabel)}`, + '', + definition.description, + '', + definition.details, + '', + '**Usage**', + '', + '```bash', + getUsage(definition), + '```', + '', + '**Quick facts**', + '', + `- Input: ${getInputSummary(definition)}`, + `- Output: ${getOutputSummary(definition)}`, + `- Execution: ${getExecutionSummary(definition)}`, + `- Backend: ${getBackendSummary(definition.catalogDefinition)}`, + '', + '**Shared flags**', + '', + ...getSharedFlagSupportNotes(definition).map((note) => `- ${note}`), + '', + ...renderOptionSection('Command options', getCommandOptionRows(definition)), + '**Examples**', + '', + renderExamples(definition.examples), + '', + ] + + return lines.join('\n') +} + +function renderAtAGlanceTable(definitions: ResolvedIntentCommandDefinition[]): string { + return renderTable( + ['Command', 'What it does', 'Input', 'Output'], + definitions.map((definition) => [ + inlineCode(definition.paths.join(' ')), + definition.description, + getInputSummary(definition), + getOutputSummary(definition), + ]), + ) +} + +function renderIntentDocsBody({ + definitions, + headingLevel, +}: { + definitions: ResolvedIntentCommandDefinition[] + headingLevel: number +}): string { + const heading = '#'.repeat(headingLevel) + const lines: string[] = [ + `${heading} At a glance`, + '', + 'Intent commands are the fastest path to common one-off tasks from the CLI.', + 'Use `--print-urls` when you want temporary result URLs without downloading locally.', + 'All intent commands also support the global CLI flags `--json`, `--log-level`, `--endpoint`, and `--help`.', + '', + renderAtAGlanceTable(definitions), + '', + '> At least one of `--out` or `--print-urls` is required on every intent command.', + '', + `${heading} Shared flags`, + '', + 'These flags are available across many intent commands, so the per-command sections below focus on differences.', + '', + ...renderOptionSection('Shared file input & output flags', getSharedFileInputOutputRows()), + ...renderOptionSection('Shared no-input output flags', getSharedNoInputOutputRows()), + ...renderOptionSection('Shared processing flags', getSharedProcessingRows()), + ...renderOptionSection('Shared watch flags', getSharedWatchRows()), + ...renderOptionSection('Shared bundling flags', getSharedBundlingRows()), + ] + + for (const definition of definitions) { + lines.push(renderIntentSection(definition, headingLevel)) + } + + return lines.join('\n').trim() +} + +function replaceGeneratedBlock({ + endMarker, + markdown, + readme, + startMarker, +}: { + endMarker: string + markdown: string + readme: string + startMarker: string +}): string { + const startIndex = readme.indexOf(startMarker) + const endIndex = readme.indexOf(endMarker) + if (startIndex === -1 || endIndex === -1 || endIndex < startIndex) { + throw new Error('README intent docs markers are missing or malformed') + } + + const before = readme.slice(0, startIndex + startMarker.length) + const after = readme.slice(endIndex) + return `${before}\n\n${markdown}\n\n${after}` +} + +async function main(): Promise { + const definitions = resolveIntentCommandDefinitions() + const readmeUrl = new URL('../../README.md', import.meta.url) + const docsUrl = new URL('../../docs/intent-commands.md', import.meta.url) + const startMarker = '' + const endMarker = '' + + const readme = await readFile(readmeUrl, 'utf8') + const readmeFragment = renderIntentDocsBody({ definitions, headingLevel: 4 }) + const fullDoc = [ + '# Intent Command Reference', + '', + '> Generated by `yarn workspace @transloadit/node sync:intent-docs`. Do not edit by hand.', + '', + renderIntentDocsBody({ definitions, headingLevel: 2 }), + ].join('\n') + + const nextReadme = replaceGeneratedBlock({ + endMarker, + markdown: readmeFragment, + readme, + startMarker, + }) + + await mkdir(dirname(docsUrl.pathname), { recursive: true }) + await writeFile(docsUrl, `${fullDoc}\n`) + await writeFile(readmeUrl, `${nextReadme}\n`) +} + +main().catch((error) => { + if (!(error instanceof Error)) { + throw new Error(`Was thrown a non-error: ${String(error)}`) + } + console.error(error) + process.exit(1) +}) diff --git a/packages/node/src/cli/intentCommandSpecs.ts b/packages/node/src/cli/intentCommandSpecs.ts new file mode 100644 index 00000000..db2cca33 --- /dev/null +++ b/packages/node/src/cli/intentCommandSpecs.ts @@ -0,0 +1,282 @@ +import type { z } from 'zod' + +import type { RobotMetaInput } from '../alphalib/types/robots/_instructions-primitives.ts' +import { + robotAudioWaveformInstructionsSchema, + meta as robotAudioWaveformMeta, +} from '../alphalib/types/robots/audio-waveform.ts' +import { + robotDocumentAutorotateInstructionsSchema, + meta as robotDocumentAutorotateMeta, +} from '../alphalib/types/robots/document-autorotate.ts' +import { + robotDocumentConvertInstructionsSchema, + meta as robotDocumentConvertMeta, +} from '../alphalib/types/robots/document-convert.ts' +import { + robotDocumentOptimizeInstructionsSchema, + meta as robotDocumentOptimizeMeta, +} from '../alphalib/types/robots/document-optimize.ts' +import { + robotDocumentThumbsInstructionsSchema, + meta as robotDocumentThumbsMeta, +} from '../alphalib/types/robots/document-thumbs.ts' +import { + robotFileCompressInstructionsSchema, + meta as robotFileCompressMeta, +} from '../alphalib/types/robots/file-compress.ts' +import { + robotFileDecompressInstructionsSchema, + meta as robotFileDecompressMeta, +} from '../alphalib/types/robots/file-decompress.ts' +import { + robotFilePreviewInstructionsSchema, + meta as robotFilePreviewMeta, +} from '../alphalib/types/robots/file-preview.ts' +import { + robotImageBgremoveInstructionsSchema, + meta as robotImageBgremoveMeta, +} from '../alphalib/types/robots/image-bgremove.ts' +import { + robotImageGenerateInstructionsSchema, + meta as robotImageGenerateMeta, +} from '../alphalib/types/robots/image-generate.ts' +import { + robotImageOptimizeInstructionsSchema, + meta as robotImageOptimizeMeta, +} from '../alphalib/types/robots/image-optimize.ts' +import { + robotImageResizeInstructionsSchema, + meta as robotImageResizeMeta, +} from '../alphalib/types/robots/image-resize.ts' +import { + robotTextSpeakInstructionsSchema, + meta as robotTextSpeakMeta, +} from '../alphalib/types/robots/text-speak.ts' +import { + robotVideoThumbsInstructionsSchema, + meta as robotVideoThumbsMeta, +} from '../alphalib/types/robots/video-thumbs.ts' + +export type IntentInputMode = 'local-files' | 'none' +export type IntentOutputMode = 'directory' | 'file' + +interface IntentSchemaDefinition { + meta: RobotMetaInput + schema: z.AnyZodObject +} + +interface IntentBaseDefinition { + outputMode?: IntentOutputMode + paths?: string[] +} + +export interface RobotIntentDefinition extends IntentBaseDefinition, IntentSchemaDefinition { + defaultSingleAssembly?: boolean + inputMode?: IntentInputMode + kind: 'robot' + robot: string +} + +export interface TemplateIntentDefinition extends IntentBaseDefinition { + kind: 'template' + paths: string[] + templateId: string +} + +export interface SemanticIntentDefinition extends IntentBaseDefinition { + kind: 'semantic' + paths: string[] + semantic: string +} + +export type IntentDefinition = + | RobotIntentDefinition + | TemplateIntentDefinition + | SemanticIntentDefinition + +const commandPathAliases = new Map([ + ['autorotate', 'auto-rotate'], + ['bgremove', 'remove-background'], +]) + +function defineRobotIntent(definition: RobotIntentDefinition): RobotIntentDefinition { + return definition +} + +function defineTemplateIntent(definition: TemplateIntentDefinition): TemplateIntentDefinition { + return definition +} + +function defineSemanticIntent(definition: SemanticIntentDefinition): SemanticIntentDefinition { + return definition +} + +export function getIntentCatalogKey(definition: IntentDefinition): string { + if (definition.kind === 'robot') { + return definition.robot + } + + if (definition.kind === 'template') { + return definition.templateId + } + + return `${definition.semantic}:${definition.paths.join('/')}` +} + +export function getIntentPaths(definition: IntentDefinition): string[] { + if (definition.paths != null) { + return definition.paths + } + + if (definition.kind !== 'robot') { + throw new Error(`Intent definition ${getIntentCatalogKey(definition)} is missing paths`) + } + + const segments = definition.robot.split('/').filter(Boolean) + const [group, action] = segments + if (group == null || action == null) { + throw new Error(`Could not infer command path from robot "${definition.robot}"`) + } + + return [group, commandPathAliases.get(action) ?? action] +} + +export function getIntentResultStepName(definition: IntentDefinition): string | null { + if (definition.kind !== 'robot') { + return null + } + + const paths = getIntentPaths(definition) + const action = paths[paths.length - 1] + if (action == null) { + throw new Error(`Intent definition ${definition.robot} has no action path`) + } + + return action.replaceAll('-', '_') +} + +export function findIntentDefinitionByPaths( + paths: readonly string[], +): IntentDefinition | undefined { + return intentCatalog.find((definition) => { + const definitionPaths = getIntentPaths(definition) + return ( + definitionPaths.length === paths.length && + definitionPaths.every((part, index) => part === paths[index]) + ) + }) +} + +export const intentCatalog = [ + defineRobotIntent({ + kind: 'robot', + robot: '/image/generate', + meta: robotImageGenerateMeta, + schema: robotImageGenerateInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/file/preview', + paths: ['preview', 'generate'], + meta: robotFilePreviewMeta, + schema: robotFilePreviewInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/image/bgremove', + meta: robotImageBgremoveMeta, + schema: robotImageBgremoveInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/image/optimize', + meta: robotImageOptimizeMeta, + schema: robotImageOptimizeInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/image/resize', + meta: robotImageResizeMeta, + schema: robotImageResizeInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/document/convert', + meta: robotDocumentConvertMeta, + schema: robotDocumentConvertInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/document/optimize', + meta: robotDocumentOptimizeMeta, + schema: robotDocumentOptimizeInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/document/autorotate', + meta: robotDocumentAutorotateMeta, + schema: robotDocumentAutorotateInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/document/thumbs', + outputMode: 'directory', + meta: robotDocumentThumbsMeta, + schema: robotDocumentThumbsInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/audio/waveform', + meta: robotAudioWaveformMeta, + schema: robotAudioWaveformInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/text/speak', + meta: robotTextSpeakMeta, + schema: robotTextSpeakInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/video/thumbs', + outputMode: 'directory', + meta: robotVideoThumbsMeta, + schema: robotVideoThumbsInstructionsSchema, + }), + defineTemplateIntent({ + kind: 'template', + templateId: 'builtin/encode-hls-video@latest', + paths: ['video', 'encode-hls'], + outputMode: 'directory', + }), + defineSemanticIntent({ + kind: 'semantic', + semantic: 'image-describe', + paths: ['image', 'describe'], + }), + defineSemanticIntent({ + kind: 'semantic', + semantic: 'markdown-pdf', + paths: ['markdown', 'pdf'], + }), + defineSemanticIntent({ + kind: 'semantic', + semantic: 'markdown-docx', + paths: ['markdown', 'docx'], + }), + defineRobotIntent({ + kind: 'robot', + robot: '/file/compress', + defaultSingleAssembly: true, + meta: robotFileCompressMeta, + schema: robotFileCompressInstructionsSchema, + }), + defineRobotIntent({ + kind: 'robot', + robot: '/file/decompress', + outputMode: 'directory', + meta: robotFileDecompressMeta, + schema: robotFileDecompressInstructionsSchema, + }), +] satisfies IntentDefinition[] diff --git a/packages/node/src/cli/intentCommands.ts b/packages/node/src/cli/intentCommands.ts new file mode 100644 index 00000000..0fefffc7 --- /dev/null +++ b/packages/node/src/cli/intentCommands.ts @@ -0,0 +1,525 @@ +import type { CommandClass } from 'clipanion' +import { Command } from 'clipanion' +import type { ZodObject, ZodRawShape, ZodTypeAny } from 'zod' + +import type { RobotMetaInput } from '../alphalib/types/robots/_instructions-primitives.ts' +import type { + IntentDefinition, + IntentInputMode, + IntentOutputMode, + RobotIntentDefinition, + SemanticIntentDefinition, +} from './intentCommandSpecs.ts' +import { getIntentPaths, getIntentResultStepName, intentCatalog } from './intentCommandSpecs.ts' +import type { IntentFieldKind, IntentFieldSpec } from './intentFields.ts' +import { + createIntentOption, + inferIntentExampleValue, + inferIntentFieldKind, + unwrapIntentSchema, +} from './intentFields.ts' +import type { IntentInputPolicy } from './intentInputPolicy.ts' +import type { + IntentCommandDefinition, + IntentFileCommandDefinition, + IntentNoInputCommandDefinition, + IntentSingleStepExecutionDefinition, +} from './intentRuntime.ts' +import { + GeneratedBundledFileIntentCommand, + GeneratedNoInputIntentCommand, + GeneratedStandardFileIntentCommand, + GeneratedWatchableFileIntentCommand, + getIntentOptionDefinitions, +} from './intentRuntime.ts' +import { getSemanticIntentDescriptor } from './semanticIntents/index.ts' + +interface GeneratedSchemaField extends IntentFieldSpec { + description?: string + exampleValue: string + optionFlags: string + propertyName: string + required: boolean +} + +interface ResolvedIntentLocalFilesInput { + defaultSingleAssembly?: boolean + inputPolicy: IntentInputPolicy + kind: 'local-files' +} + +interface ResolvedIntentNoneInput { + kind: 'none' +} + +type ResolvedIntentInput = ResolvedIntentLocalFilesInput | ResolvedIntentNoneInput + +type IntentBaseClass = + | typeof GeneratedBundledFileIntentCommand + | typeof GeneratedNoInputIntentCommand + | typeof GeneratedStandardFileIntentCommand + | typeof GeneratedWatchableFileIntentCommand + +type BuiltIntentCommandDefinition = IntentCommandDefinition & { + intentDefinition: IntentFileCommandDefinition | IntentNoInputCommandDefinition +} + +export type ResolvedIntentCommandDefinition = BuiltIntentCommandDefinition & { + catalogDefinition: IntentDefinition +} + +const hiddenFieldNames = new Set([ + 'ffmpeg_stack', + 'force_accept', + 'ignore_errors', + 'imagemagick_stack', + 'output_meta', + 'queue', + 'result', + 'robot', + 'stack', + 'use', +]) + +function toCamelCase(value: string): string { + return value.replace(/_([a-z])/g, (_match, letter: string) => letter.toUpperCase()) +} + +function toKebabCase(value: string): string { + return value.replaceAll('_', '-') +} + +function toPascalCase(parts: string[]): string { + return parts + .flatMap((part) => part.split('-')) + .map((part) => `${part[0]?.toUpperCase() ?? ''}${part.slice(1)}`) + .join('') +} + +function stripTrailingPunctuation(value: string): string { + return value.replace(/[.:]+$/, '').trim() +} + +function getTypicalInputFile(meta: RobotMetaInput): string { + switch (meta.typical_file_type) { + case 'audio file': + return 'input.mp3' + case 'document': + return 'input.pdf' + case 'image': + return 'input.png' + case 'video': + return 'input.mp4' + default: + return 'input.file' + } +} + +function getDefaultOutputPath(paths: string[], outputMode: IntentOutputMode): string { + if (outputMode === 'directory') { + return 'output/' + } + + const [group] = paths + if (group === 'audio') return 'output.png' + if (group === 'document') return 'output.pdf' + if (group === 'image') return 'output.png' + if (group === 'text') return 'output.mp3' + return 'output.file' +} + +function inferOutputPath( + paths: string[], + outputMode: IntentOutputMode, + fieldSpecs: readonly GeneratedSchemaField[], +): string { + if (outputMode === 'directory') { + return 'output/' + } + + const formatExample = fieldSpecs + .map((fieldSpec) => + fieldSpec.required && fieldSpec.name === 'format' ? fieldSpec.exampleValue : null, + ) + .find((value) => value != null) + + if (fieldSpecs.some((fieldSpec) => fieldSpec.name === 'format') && formatExample != null) { + if (fieldSpecs.some((fieldSpec) => fieldSpec.name === 'relative_pathname')) { + return `archive.${formatExample}` + } + } + + if (formatExample != null && /^[-\w]+$/.test(formatExample)) { + return `output.${formatExample}` + } + + return getDefaultOutputPath(paths, outputMode) +} + +function inferInputModeFromShape(shape: Record): IntentInputMode { + if ('prompt' in shape) { + return unwrapIntentSchema(shape.prompt).required ? 'none' : 'local-files' + } + + return 'local-files' +} + +function inferIntentInput( + definition: RobotIntentDefinition, + shape: Record, +): ResolvedIntentInput { + const inputMode = definition.inputMode ?? inferInputModeFromShape(shape) + if (inputMode === 'none') { + return { kind: 'none' } + } + + const promptIsOptional = 'prompt' in shape && !unwrapIntentSchema(shape.prompt).required + const inputPolicy = promptIsOptional + ? ({ + kind: 'optional', + field: 'prompt', + attachUseWhenInputsProvided: true, + } satisfies IntentInputPolicy) + : ({ kind: 'required' } satisfies IntentInputPolicy) + + if (definition.defaultSingleAssembly) { + return { + kind: 'local-files', + defaultSingleAssembly: true, + inputPolicy, + } + } + + return { + kind: 'local-files', + inputPolicy, + } +} + +function inferFixedValues( + definition: RobotIntentDefinition, + input: ResolvedIntentInput, + inputMode: IntentInputMode, +): Record { + if (definition.defaultSingleAssembly) { + return { + robot: definition.robot, + result: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + } + } + + if (inputMode === 'none') { + return { + robot: definition.robot, + result: true, + } + } + + if (input.kind === 'local-files' && input.inputPolicy.kind === 'required') { + return { + robot: definition.robot, + result: true, + use: ':original', + } + } + + return { + robot: definition.robot, + result: true, + } +} + +function collectSchemaFields( + schemaShape: Record, + fixedValues: Record, + input: ResolvedIntentInput, +): GeneratedSchemaField[] { + return Object.entries(schemaShape) + .filter(([key]) => !hiddenFieldNames.has(key) && !Object.hasOwn(fixedValues, key)) + .flatMap(([key, fieldSchema]) => { + const { required: schemaRequired, schema: unwrappedSchema } = unwrapIntentSchema(fieldSchema) + + let kind: IntentFieldKind + try { + kind = inferIntentFieldKind(unwrappedSchema) + } catch { + return [] + } + + return [ + { + name: key, + propertyName: toCamelCase(key), + optionFlags: `--${toKebabCase(key)}`, + required: (input.kind === 'none' && key === 'prompt') || schemaRequired, + description: fieldSchema.description, + exampleValue: inferIntentExampleValue({ + kind, + name: key, + schema: unwrappedSchema as ZodTypeAny, + }), + kind, + }, + ] + }) +} + +function inferExamples( + spec: BuiltIntentCommandDefinition, + definition?: RobotIntentDefinition, +): Array<[string, string]> { + if (definition == null) { + if (spec.intentDefinition.execution.kind === 'dynamic-step') { + return spec.examples + } + + return [ + ['Run the command', `transloadit ${spec.paths.join(' ')} --input input.mp4 --out output/`], + ] + } + + const parts = ['transloadit', ...spec.paths] + const schemaShape = (definition.schema as ZodObject).shape as Record< + string, + ZodTypeAny + > + const inputMode = definition.inputMode ?? inferInputModeFromShape(schemaShape) + const fieldSpecs = + spec.intentDefinition.execution.kind === 'single-step' + ? (spec.intentDefinition.execution.fields as readonly GeneratedSchemaField[]) + : [] + + if (inputMode === 'local-files') { + parts.push('--input', getTypicalInputFile(definition.meta)) + } + + if (inputMode === 'none') { + const promptField = fieldSpecs.find((fieldSpec) => fieldSpec.name === 'prompt') + parts.push('--prompt', promptField?.exampleValue ?? JSON.stringify('A red bicycle in a studio')) + } + + for (const fieldSpec of fieldSpecs) { + if (!fieldSpec.required) continue + if (fieldSpec.name === 'prompt' && inputMode === 'none') continue + + parts.push(fieldSpec.optionFlags, fieldSpec.exampleValue) + } + + const outputMode = spec.intentDefinition.outputMode ?? 'file' + parts.push('--out', inferOutputPath(spec.paths, outputMode, fieldSpecs)) + + return [['Run the command', parts.join(' ')]] +} + +function resolveRobotIntent(definition: RobotIntentDefinition): BuiltIntentCommandDefinition { + const paths = getIntentPaths(definition) + const className = `${toPascalCase(paths)}Command` + const commandLabel = paths.join(' ') + const schema = definition.schema as ZodObject + const schemaShape = schema.shape as Record + const inputMode = definition.inputMode ?? inferInputModeFromShape(schemaShape) + const input = inferIntentInput(definition, schemaShape) + const fixedValues = inferFixedValues(definition, input, inputMode) + const fieldSpecs = collectSchemaFields(schemaShape, fixedValues, input) + const outputMode = definition.outputMode ?? 'file' + const execution: IntentSingleStepExecutionDefinition = { + kind: 'single-step', + schema, + fields: fieldSpecs, + fixedValues, + resultStepName: + getIntentResultStepName(definition) ?? + (() => { + throw new Error(`Could not infer result step name for "${definition.robot}"`) + })(), + } + + const spec: BuiltIntentCommandDefinition = { + className, + description: stripTrailingPunctuation(definition.meta.title), + details: getIntentDetails({ + defaultSingleAssembly: definition.defaultSingleAssembly === true, + inputMode, + outputMode, + robot: definition.robot, + }), + examples: [], + paths, + runnerKind: + input.kind === 'none' ? 'no-input' : input.defaultSingleAssembly ? 'bundled' : 'standard', + intentDefinition: + input.kind === 'none' + ? { + execution, + outputDescription: 'Write the result to this path', + outputMode, + } + : { + commandLabel, + execution, + inputPolicy: input.inputPolicy, + outputDescription: + outputMode === 'directory' + ? 'Write the results to this directory' + : 'Write the result to this path or directory', + outputMode, + }, + } + + return { + ...spec, + examples: inferExamples(spec, definition), + } +} + +function getIntentDetails({ + defaultSingleAssembly, + inputMode, + outputMode, + robot, +}: { + defaultSingleAssembly: boolean + inputMode: IntentInputMode + outputMode: 'directory' | 'file' + robot: string +}): string { + if (inputMode === 'none') { + return `Runs \`${robot}\` and writes the result to \`--out\`.` + } + + if (defaultSingleAssembly) { + return `Runs \`${robot}\` for the provided inputs and writes the result to \`--out\`.` + } + + if (outputMode === 'directory') { + return `Runs \`${robot}\` on each input file and writes the results to \`--out\`.` + } + + return `Runs \`${robot}\` on each input file and writes the result to \`--out\`.` +} + +function resolveSemanticIntent(definition: SemanticIntentDefinition): BuiltIntentCommandDefinition { + const paths = getIntentPaths(definition) + const descriptor = getSemanticIntentDescriptor(definition.semantic) + + return { + className: `${toPascalCase(paths)}Command`, + description: descriptor.presentation.description, + details: descriptor.presentation.details, + examples: [...descriptor.presentation.examples], + paths, + runnerKind: descriptor.runnerKind, + intentDefinition: { + commandLabel: paths.join(' '), + execution: descriptor.execution, + inputPolicy: descriptor.inputPolicy, + outputDescription: descriptor.outputDescription, + }, + } +} + +function resolveTemplateIntent( + definition: IntentDefinition & { kind: 'template' }, +): BuiltIntentCommandDefinition { + const outputMode = definition.outputMode ?? 'file' + const paths = getIntentPaths(definition) + const spec: BuiltIntentCommandDefinition = { + className: `${toPascalCase(paths)}Command`, + description: `Run ${stripTrailingPunctuation(definition.templateId)}`, + details: `Runs the \`${definition.templateId}\` template and writes the outputs to \`--out\`.`, + examples: [], + paths, + runnerKind: 'standard', + intentDefinition: { + commandLabel: paths.join(' '), + execution: { + kind: 'template', + templateId: definition.templateId, + }, + inputPolicy: { kind: 'required' }, + outputDescription: + outputMode === 'directory' + ? 'Write the results to this directory' + : 'Write the result to this path or directory', + outputMode, + }, + } + + return { + ...spec, + examples: inferExamples(spec), + } +} + +function resolveIntent(definition: IntentDefinition): BuiltIntentCommandDefinition { + if (definition.kind === 'robot') { + return resolveRobotIntent(definition) + } + + if (definition.kind === 'semantic') { + return resolveSemanticIntent(definition) + } + + return resolveTemplateIntent(definition) +} + +export function resolveIntentCommandDefinitions(): ResolvedIntentCommandDefinition[] { + return intentCatalog.map((definition) => ({ + ...resolveIntent(definition), + catalogDefinition: definition, + })) +} + +function getBaseClass(spec: BuiltIntentCommandDefinition): IntentBaseClass { + if (spec.runnerKind === 'no-input') { + return GeneratedNoInputIntentCommand + } + + if (spec.runnerKind === 'bundled') { + return GeneratedBundledFileIntentCommand + } + + if (spec.runnerKind === 'watchable') { + return GeneratedWatchableFileIntentCommand + } + + return GeneratedStandardFileIntentCommand +} + +function createIntentCommandClass(spec: BuiltIntentCommandDefinition): CommandClass { + const BaseClass = getBaseClass(spec) + + class RuntimeIntentCommand extends BaseClass {} + + Object.defineProperty(RuntimeIntentCommand, 'name', { + value: spec.className, + }) + + Object.assign(RuntimeIntentCommand, { + paths: [spec.paths], + intentDefinition: spec.intentDefinition, + usage: Command.Usage({ + category: 'Intent Commands', + description: spec.description, + details: spec.details, + examples: spec.examples, + }), + }) + + for (const field of getIntentOptionDefinitions(spec.intentDefinition)) { + Object.defineProperty(RuntimeIntentCommand.prototype, field.propertyName, { + configurable: true, + enumerable: true, + writable: true, + value: createIntentOption(field), + }) + } + + return RuntimeIntentCommand as unknown as CommandClass +} + +export const intentCommands = resolveIntentCommandDefinitions().map(createIntentCommandClass) diff --git a/packages/node/src/cli/intentFields.ts b/packages/node/src/cli/intentFields.ts new file mode 100644 index 00000000..df5c2f8e --- /dev/null +++ b/packages/node/src/cli/intentFields.ts @@ -0,0 +1,403 @@ +import { Option } from 'clipanion' +import * as t from 'typanion' +import type { z } from 'zod' +import { + ZodArray, + ZodBoolean, + ZodDefault, + ZodEffects, + ZodEnum, + ZodLiteral, + ZodNullable, + ZodNumber, + ZodObject, + ZodOptional, + ZodString, + ZodUnion, +} from 'zod' + +export type IntentFieldKind = 'auto' | 'boolean' | 'json' | 'number' | 'string' | 'string-array' + +export interface IntentFieldSpec { + kind: IntentFieldKind + name: string +} + +export interface IntentOptionLike extends IntentFieldSpec { + description?: string + optionFlags: string + required?: boolean +} + +export function unwrapIntentSchema(input: unknown): { required: boolean; schema: unknown } { + let schema = input + let required = true + + while (true) { + if (schema instanceof ZodEffects) { + schema = schema._def.schema + continue + } + + if (schema instanceof ZodOptional) { + required = false + schema = schema.unwrap() + continue + } + + if (schema instanceof ZodDefault) { + required = false + schema = schema.removeDefault() + continue + } + + if (schema instanceof ZodNullable) { + required = false + schema = schema.unwrap() + continue + } + + return { required, schema } + } +} + +export function inferIntentFieldKind(schema: unknown): IntentFieldKind { + const unwrappedSchema = unwrapIntentSchema(schema).schema + + if (unwrappedSchema instanceof ZodString || unwrappedSchema instanceof ZodEnum) { + return 'string' + } + + if (unwrappedSchema instanceof ZodNumber) { + return 'number' + } + + if (unwrappedSchema instanceof ZodBoolean) { + return 'boolean' + } + + if (unwrappedSchema instanceof ZodLiteral) { + if (typeof unwrappedSchema.value === 'number') return 'number' + if (typeof unwrappedSchema.value === 'boolean') return 'boolean' + return 'string' + } + + if (unwrappedSchema instanceof ZodArray) { + const elementKind = inferIntentFieldKind(unwrappedSchema.element) + if (elementKind === 'string') { + return 'string-array' + } + + return 'json' + } + + if (unwrappedSchema instanceof ZodObject) { + return 'json' + } + + if (unwrappedSchema instanceof ZodUnion) { + const optionKinds = Array.from( + new Set(unwrappedSchema._def.options.map((option: unknown) => inferIntentFieldKind(option))), + ) as IntentFieldKind[] + if ( + optionKinds.length === 2 && + optionKinds.includes('string') && + optionKinds.includes('string-array') + ) { + return 'string-array' + } + if (optionKinds.length === 1) { + const [kind] = optionKinds + if (kind != null) return kind + } + return 'auto' + } + + throw new Error('Unsupported schema type') +} + +export function createIntentOption(fieldDefinition: IntentOptionLike): unknown { + const { description, kind, optionFlags, required } = fieldDefinition + + if (kind === 'boolean') { + return Option.Boolean(optionFlags, { + description, + required, + }) + } + + if (kind === 'number') { + return Option.String(optionFlags, { + description, + required, + validator: t.isNumber(), + }) + } + + if (kind === 'string-array') { + return Option.Array(optionFlags, { + description, + required, + }) + } + + return Option.String(optionFlags, { + description, + required, + }) +} + +function inferSchemaExampleValue(schema: unknown): string | null { + const unwrappedSchema = unwrapIntentSchema(schema).schema + + if (unwrappedSchema instanceof ZodLiteral) { + return String(unwrappedSchema.value) + } + + if (unwrappedSchema instanceof ZodEnum) { + return unwrappedSchema.options[0] ?? null + } + + if (unwrappedSchema instanceof ZodUnion) { + for (const option of unwrappedSchema._def.options) { + const exampleValue = inferSchemaExampleValue(option) + if (exampleValue != null) { + return exampleValue + } + } + } + + return null +} + +export function parseStringArrayValue(raw: unknown): string[] { + const addNormalizedValues = (source: string[], value: string): void => { + source.push( + ...value + .split(',') + .map((part) => part.trim()) + .filter(Boolean), + ) + } + + const normalizeJsonArray = (value: string): string[] | null => { + const trimmed = value.trim() + if (!trimmed.startsWith('[')) { + return null + } + + let parsedJson: unknown + try { + parsedJson = JSON.parse(trimmed) + } catch { + throw new Error(`Expected valid JSON but received "${value}"`) + } + + if (!Array.isArray(parsedJson) || !parsedJson.every((item) => typeof item === 'string')) { + throw new Error(`Expected an array of strings but received "${value}"`) + } + + return parsedJson + } + + const values = Array.isArray(raw) ? raw : [raw] + const normalizedValues: string[] = [] + for (const value of values) { + if (typeof value !== 'string') { + normalizedValues.push(String(value)) + continue + } + + const parsedJson = normalizeJsonArray(value) + if (parsedJson != null) { + normalizedValues.push(...parsedJson) + continue + } + + addNormalizedValues(normalizedValues, value) + } + + return normalizedValues +} + +function pickPreferredExampleValue(name: string, candidates: readonly string[]): string | null { + if (candidates.length === 0) { + return null + } + + if (name === 'format') { + const preferredFormats = ['pdf', 'zip', 'jpg', 'png', 'mp3'] + for (const preferredFormat of preferredFormats) { + if (candidates.includes(preferredFormat)) { + return preferredFormat + } + } + } + + return candidates[0] ?? null +} + +export function inferIntentExampleValue({ + kind, + name, + schema, +}: { + kind: IntentFieldKind + name: string + schema?: z.ZodTypeAny +}): string { + const preferredExamples = { + prompt: JSON.stringify('A red bicycle in a studio'), + provider: 'aws', + target_language: 'en-US', + voice: 'female-1', + } as const satisfies Record + const preferredExample = (preferredExamples as Record)[name] + if (preferredExample != null) { + return preferredExample + } + + const schemaExample = + schema instanceof ZodEnum + ? pickPreferredExampleValue(name, schema.options) + : schema instanceof ZodUnion + ? pickPreferredExampleValue( + name, + schema._def.options + .map((option: unknown) => inferSchemaExampleValue(option)) + .filter((value: string | null): value is string => value != null), + ) + : schema == null + ? null + : inferSchemaExampleValue(schema) + if (schemaExample != null) { + return schemaExample + } + + if (kind === 'boolean') { + return 'true' + } + + if (kind === 'number') { + return '1' + } + + return 'value' +} + +export function coerceIntentFieldValue( + kind: IntentFieldKind, + raw: unknown, + fieldSchema?: z.ZodTypeAny, +): unknown { + if (kind === 'number' && typeof raw === 'number') { + return raw + } + + if (kind === 'boolean' && typeof raw === 'boolean') { + return raw + } + + if (kind === 'auto') { + if (fieldSchema == null) { + return raw + } + + const candidates: unknown[] = [] + + if (typeof raw !== 'string') { + candidates.push(raw) + } + + const trimmed = typeof raw === 'string' ? raw.trim() : '' + + if (typeof raw === 'string' && (trimmed.startsWith('{') || trimmed.startsWith('['))) { + try { + candidates.push(JSON.parse(trimmed)) + } catch {} + } + + candidates.push(raw) + + if ( + typeof raw === 'string' && + trimmed !== '' && + !trimmed.startsWith('{') && + !trimmed.startsWith('[') + ) { + try { + candidates.push(JSON.parse(trimmed)) + } catch {} + } + + if (raw === 'true' || raw === 'false') { + candidates.push(raw === 'true') + } + + const numericValue = Number(raw) + if ((typeof raw === 'number' || trimmed !== '') && !Number.isNaN(numericValue)) { + candidates.push(numericValue) + } + + for (const candidate of candidates) { + const parsed = fieldSchema.safeParse(candidate) + if (parsed.success) { + return parsed.data as boolean | number | string + } + } + + return raw + } + + if (kind === 'number') { + if (typeof raw !== 'string') { + throw new Error(`Expected a number but received "${String(raw)}"`) + } + if (raw.trim() === '') { + throw new Error(`Expected a number but received "${raw}"`) + } + const value = Number(raw) + if (Number.isNaN(value)) { + throw new Error(`Expected a number but received "${raw}"`) + } + return value + } + + if (kind === 'json') { + if (typeof raw !== 'string') { + return raw + } + let parsedJson: unknown + try { + parsedJson = JSON.parse(raw) + } catch { + throw new Error(`Expected valid JSON but received "${raw}"`) + } + + if (fieldSchema == null) { + return parsedJson + } + + const parsed = fieldSchema.safeParse(parsedJson) + if (!parsed.success) { + throw new Error(parsed.error.message) + } + + return parsed.data + } + + if (kind === 'boolean') { + if (typeof raw !== 'string') { + throw new Error(`Expected "true" or "false" but received "${String(raw)}"`) + } + if (raw === 'true') return true + if (raw === 'false') return false + throw new Error(`Expected "true" or "false" but received "${raw}"`) + } + + if (kind === 'string-array') { + return parseStringArrayValue(raw) + } + + return raw +} diff --git a/packages/node/src/cli/intentInputPolicy.ts b/packages/node/src/cli/intentInputPolicy.ts new file mode 100644 index 00000000..c72dc576 --- /dev/null +++ b/packages/node/src/cli/intentInputPolicy.ts @@ -0,0 +1,11 @@ +export interface RequiredIntentInputPolicy { + kind: 'required' +} + +export interface OptionalIntentInputPolicy { + attachUseWhenInputsProvided: boolean + field: string + kind: 'optional' +} + +export type IntentInputPolicy = OptionalIntentInputPolicy | RequiredIntentInputPolicy diff --git a/packages/node/src/cli/intentRuntime.ts b/packages/node/src/cli/intentRuntime.ts new file mode 100644 index 00000000..1c07a22c --- /dev/null +++ b/packages/node/src/cli/intentRuntime.ts @@ -0,0 +1,734 @@ +import { statSync } from 'node:fs' +import { basename } from 'node:path' +import { Option } from 'clipanion' +import type { z } from 'zod' + +import { prepareInputFiles } from '../inputFiles.ts' +import type { AssembliesCreateOptions } from './commands/assemblies.ts' +import * as assembliesCommands from './commands/assemblies.ts' +import { AuthenticatedCommand } from './commands/BaseCommand.ts' +import type { SharedCliOptionDocumentation } from './fileProcessingOptions.ts' +import { + concurrencyOption, + countProvidedInputs, + deleteAfterProcessingOption, + inputPathsOption, + printUrlsOption, + recursiveOption, + reprocessStaleOption, + singleAssemblyOption, + validateSharedFileProcessingOptions, + watchOption, +} from './fileProcessingOptions.ts' +import type { IntentFieldSpec } from './intentFields.ts' +import { coerceIntentFieldValue } from './intentFields.ts' +import type { IntentInputPolicy } from './intentInputPolicy.ts' +import { printResultUrls } from './resultUrls.ts' +import { getSemanticIntentDescriptor } from './semanticIntents/index.ts' + +export interface PreparedIntentInputs { + cleanup: Array<() => Promise> + hasTransientInputs: boolean + inputs: string[] +} + +export interface IntentSingleStepExecutionDefinition { + fields: readonly IntentOptionDefinition[] + fixedValues: Record + kind: 'single-step' + resultStepName: string + schema: z.AnyZodObject +} + +export interface IntentDynamicStepExecutionDefinition { + fields: readonly IntentOptionDefinition[] + handler: string + kind: 'dynamic-step' + resultStepName: string +} + +export interface IntentTemplateExecutionDefinition { + kind: 'template' + templateId: string +} + +export type IntentFileExecutionDefinition = + | IntentDynamicStepExecutionDefinition + | IntentSingleStepExecutionDefinition + | IntentTemplateExecutionDefinition + +export interface IntentFileCommandDefinition { + commandLabel: string + execution: IntentFileExecutionDefinition + inputPolicy: IntentInputPolicy + outputDescription: string + outputMode?: 'directory' | 'file' +} + +export interface IntentNoInputCommandDefinition { + execution: IntentSingleStepExecutionDefinition + outputDescription: string + outputMode?: 'directory' | 'file' +} + +export type IntentRunnerKind = 'bundled' | 'no-input' | 'standard' | 'watchable' + +export interface IntentCommandDefinition { + className: string + description: string + details: string + examples: Array<[string, string]> + intentDefinition: IntentFileCommandDefinition | IntentNoInputCommandDefinition + paths: string[] + runnerKind: IntentRunnerKind +} + +export interface IntentOptionDefinition extends IntentFieldSpec { + description?: string + exampleValue?: unknown + optionFlags: string + propertyName: string + required?: boolean +} + +const inputBase64OptionDocumentation = { + flags: '--input-base64', + type: 'base64 | data URL', + required: 'no', + example: 'data:text/plain;base64,SGVsbG8=', + description: 'Provide base64-encoded input content directly', +} as const satisfies SharedCliOptionDocumentation + +export function getInputBase64OptionDocumentation(): SharedCliOptionDocumentation { + return inputBase64OptionDocumentation +} + +function inputBase64Option(): string[] { + return Option.Array(inputBase64OptionDocumentation.flags, { + description: inputBase64OptionDocumentation.description, + }) as unknown as string[] +} + +function isHttpUrl(value: string): boolean { + try { + const url = new URL(value) + return url.protocol === 'http:' || url.protocol === 'https:' + } catch { + return false + } +} + +function parseBase64DataUrl( + value: string, +): { mediaType: string | null; payload: string; trimmed: string } | null { + const trimmed = value.trim() + const marker = ';base64,' + const markerIndex = trimmed.indexOf(marker) + if (!trimmed.startsWith('data:') || markerIndex === -1) { + return null + } + + return { + trimmed, + mediaType: trimmed.slice('data:'.length, markerIndex).split(';')[0]?.toLowerCase() ?? null, + payload: trimmed.slice(markerIndex + marker.length), + } +} + +function normalizeBase64Value(value: string): string { + const parsed = parseBase64DataUrl(value) + return parsed?.payload ?? value.trim() +} + +function inferFilenameFromBase64Value(value: string, index: number): string { + const parsed = parseBase64DataUrl(value) + if (parsed == null) { + return `input-base64-${index}.bin` + } + + const extensionByMediaType = { + 'text/plain': 'txt', + 'text/markdown': 'md', + 'application/pdf': 'pdf', + 'image/png': 'png', + 'image/jpeg': 'jpg', + 'image/webp': 'webp', + 'application/json': 'json', + } as const satisfies Record + const extension = + (extensionByMediaType as Record)[parsed.mediaType ?? ''] ?? 'bin' + + return `input-base64-${index}.${extension}` +} + +export async function prepareIntentInputs({ + inputBase64Values, + inputValues, +}: { + inputBase64Values: string[] + inputValues: string[] +}): Promise { + const preparedOrder: string[] = [] + const syntheticInputs: Array< + | { + base64: string + field: string + filename: string + kind: 'base64' + } + | { + field: string + kind: 'url' + url: string + } + > = [] + + for (const value of inputValues) { + if (!isHttpUrl(value)) { + preparedOrder.push(value) + continue + } + + const field = `input_url_${syntheticInputs.length + 1}` + syntheticInputs.push({ + kind: 'url', + field, + url: value, + }) + preparedOrder.push(field) + } + + for (const [index, value] of inputBase64Values.entries()) { + const field = `input_base64_${index + 1}` + const filename = inferFilenameFromBase64Value(value, index + 1) + syntheticInputs.push({ + kind: 'base64', + field, + filename, + base64: normalizeBase64Value(value), + }) + preparedOrder.push(field) + } + + if (syntheticInputs.length === 0) { + return { + cleanup: [], + hasTransientInputs: false, + inputs: preparedOrder, + } + } + + const prepared = await prepareInputFiles({ + inputFiles: syntheticInputs.map((input) => { + if (input.kind === 'url') { + return { + kind: 'url' as const, + field: input.field, + url: input.url, + filename: basename(new URL(input.url).pathname) || undefined, + } + } + + return { + kind: 'base64' as const, + field: input.field, + base64: input.base64, + filename: input.filename, + } + }), + base64Strategy: 'tempfile', + allowPrivateUrls: false, + urlStrategy: 'download', + }) + + const inputs = preparedOrder.map((value) => prepared.files[value] ?? value) + + return { + cleanup: prepared.cleanup, + hasTransientInputs: true, + inputs, + } +} + +function parseIntentStep({ + fields, + fixedValues, + rawValues, + schema, +}: { + fields: readonly IntentFieldSpec[] + fixedValues: Record + rawValues: Record + schema: TSchema +}): z.input { + const input: Record = { ...fixedValues } + + for (const fieldSpec of fields) { + const rawValue = rawValues[fieldSpec.name] + if (rawValue == null) continue + const fieldSchema = schema.shape[fieldSpec.name] + input[fieldSpec.name] = coerceIntentFieldValue(fieldSpec.kind, rawValue, fieldSchema) + } + + const parsed = schema.parse(input) as Record + const normalizedInput: Record = { ...fixedValues } + + for (const fieldSpec of fields) { + const rawValue = rawValues[fieldSpec.name] + if (rawValue == null) continue + normalizedInput[fieldSpec.name] = parsed[fieldSpec.name] + } + + return normalizedInput as z.input +} + +function resolveSingleStepFixedValues( + execution: IntentSingleStepExecutionDefinition, + inputPolicy: IntentInputPolicy, + hasInputs: boolean, +): Record { + if (!hasInputs) { + return execution.fixedValues + } + + if (inputPolicy.kind !== 'optional' || inputPolicy.attachUseWhenInputsProvided !== true) { + return execution.fixedValues + } + + return { + ...execution.fixedValues, + use: ':original', + } +} + +function createSingleStep( + execution: IntentSingleStepExecutionDefinition, + inputPolicy: IntentInputPolicy, + rawValues: Record, + hasInputs: boolean, +): z.input { + return parseIntentStep({ + schema: execution.schema, + fixedValues: resolveSingleStepFixedValues(execution, inputPolicy, hasInputs), + fields: execution.fields, + rawValues, + }) +} + +function createDynamicIntentStep( + execution: IntentDynamicStepExecutionDefinition, + rawValues: Record, +): Record { + return getSemanticIntentDescriptor(execution.handler).createStep(rawValues) +} + +function requiresLocalInput( + inputPolicy: IntentInputPolicy, + rawValues: Record, +): boolean { + if (inputPolicy.kind === 'required') { + return true + } + + return rawValues[inputPolicy.field] == null +} + +async function executeIntentCommand({ + client, + definition, + output, + outputPath, + printUrls, + rawValues, + createOptions, +}: { + client: AuthenticatedCommand['client'] + createOptions: Omit + definition: IntentFileCommandDefinition | IntentNoInputCommandDefinition + output: AuthenticatedCommand['output'] + outputPath?: string + printUrls: boolean + rawValues: Record +}): Promise { + const inputPolicy: IntentInputPolicy = + 'inputPolicy' in definition ? definition.inputPolicy : { kind: 'required' } + const executionOptions = + definition.execution.kind === 'template' + ? { + template: definition.execution.templateId, + } + : { + stepsData: { + [definition.execution.resultStepName]: + definition.execution.kind === 'single-step' + ? createSingleStep( + definition.execution, + inputPolicy, + rawValues, + createOptions.inputs.length > 0, + ) + : createDynamicIntentStep(definition.execution, rawValues), + } as AssembliesCreateOptions['stepsData'], + } + + const { hasFailures, resultUrls } = await assembliesCommands.create(output, client, { + ...createOptions, + output: outputPath ?? null, + outputMode: definition.outputMode, + ...executionOptions, + }) + if (printUrls) { + printResultUrls(output, resultUrls) + } + return hasFailures ? 1 : undefined +} + +abstract class GeneratedIntentCommandBase extends AuthenticatedCommand { + declare static intentDefinition: IntentFileCommandDefinition | IntentNoInputCommandDefinition + + outputPath = Option.String('--out,-o', { + description: this.getOutputDescription(), + }) + + printUrls = printUrlsOption() + + protected getIntentDefinition(): IntentFileCommandDefinition | IntentNoInputCommandDefinition { + const commandClass = this.constructor as unknown as typeof GeneratedIntentCommandBase + return commandClass.intentDefinition + } + + protected getIntentRawValues(): Record { + return readIntentRawValues(this, getIntentOptionDefinitions(this.getIntentDefinition())) + } + + private getOutputDescription(): string { + return this.getIntentDefinition().outputDescription + } + + protected validateOutputChoice(): number | undefined { + if (this.outputPath == null && !this.printUrls) { + this.output.error('Specify at least one of --out or --print-urls') + return 1 + } + + return undefined + } +} + +export abstract class GeneratedNoInputIntentCommand extends GeneratedIntentCommandBase { + protected override async run(): Promise { + const outputValidationError = this.validateOutputChoice() + if (outputValidationError != null) { + return outputValidationError + } + + return await executeIntentCommand({ + client: this.client, + createOptions: { + inputs: [], + }, + definition: this.getIntentDefinition() as IntentNoInputCommandDefinition, + output: this.output, + outputPath: this.outputPath, + printUrls: this.printUrls ?? false, + rawValues: this.getIntentRawValues(), + }) + } +} + +export function getIntentOptionDefinitions( + definition: IntentFileCommandDefinition | IntentNoInputCommandDefinition, +): readonly IntentOptionDefinition[] { + if (definition.execution.kind !== 'single-step' && definition.execution.kind !== 'dynamic-step') { + return [] + } + + return definition.execution.fields +} + +function readIntentRawValues( + command: object, + fieldDefinitions: readonly IntentOptionDefinition[], +): Record { + const rawValues: Record = {} + + for (const fieldDefinition of fieldDefinitions) { + rawValues[fieldDefinition.name] = (command as Record)[ + fieldDefinition.propertyName + ] + } + + return rawValues +} + +abstract class GeneratedFileIntentCommandBase extends GeneratedIntentCommandBase { + inputs = inputPathsOption('Provide an input path, directory, URL, or - for stdin') + + inputBase64 = inputBase64Option() + + recursive = recursiveOption() + + deleteAfterProcessing = deleteAfterProcessingOption() + + reprocessStale = reprocessStaleOption() + + protected override getIntentDefinition(): IntentFileCommandDefinition { + return super.getIntentDefinition() as IntentFileCommandDefinition + } + + protected async prepareInputs(): Promise { + return await prepareIntentInputs({ + inputValues: this.inputs ?? [], + inputBase64Values: this.inputBase64 ?? [], + }) + } + + protected getCreateOptions( + inputs: string[], + ): Omit { + return { + del: this.deleteAfterProcessing, + inputs, + reprocessStale: this.reprocessStale, + recursive: this.recursive, + } + } + + protected getProvidedInputCount(): number { + return countProvidedInputs({ + inputs: this.inputs, + inputBase64: this.inputBase64, + }) + } + + protected hasTransientInputSources(): boolean { + return ( + (this.inputs?.some((input) => isHttpUrl(input)) ?? false) || + (this.inputBase64?.length ?? 0) > 0 + ) + } + + protected resolveOutputMode(): 'directory' | 'file' | undefined { + if (this.getIntentDefinition().outputMode != null) { + return this.getIntentDefinition().outputMode + } + + if (this.outputPath == null) { + return undefined + } + + try { + return statSync(this.outputPath).isDirectory() ? 'directory' : 'file' + } catch { + return 'file' + } + } + + protected isDirectoryOutputTarget(): boolean { + return this.resolveOutputMode() === 'directory' + } + + protected validateInputPresence(rawValues: Record): number | undefined { + const intentDefinition = this.getIntentDefinition() + const inputCount = this.getProvidedInputCount() + if (inputCount !== 0) { + return undefined + } + + if (!requiresLocalInput(intentDefinition.inputPolicy, rawValues)) { + return undefined + } + + if (intentDefinition.inputPolicy.kind === 'required') { + this.output.error(`${intentDefinition.commandLabel} requires --input or --input-base64`) + return 1 + } + + this.output.error( + `${intentDefinition.commandLabel} requires --input or --${intentDefinition.inputPolicy.field.replaceAll('_', '-')}`, + ) + return 1 + } + + protected validateBeforePreparingInputs(rawValues: Record): number | undefined { + const outputValidationError = this.validateOutputChoice() + if (outputValidationError != null) { + return outputValidationError + } + + const validationError = this.validateInputPresence(rawValues) + if (validationError != null) { + return validationError + } + + const execution = this.getIntentDefinition().execution + if (execution.kind === 'dynamic-step') { + createDynamicIntentStep(execution, rawValues) + } + + return undefined + } + + protected validatePreparedInputs(_preparedInputs: PreparedIntentInputs): number | undefined { + return undefined + } + + protected async executePreparedInputs( + rawValues: Record, + preparedInputs: PreparedIntentInputs, + ): Promise { + let effectivePreparedInputs = preparedInputs + const execution = this.getIntentDefinition().execution + if (execution.kind === 'dynamic-step') { + const descriptor = getSemanticIntentDescriptor(execution.handler) + if (descriptor.prepareInputs != null) { + effectivePreparedInputs = await descriptor.prepareInputs(preparedInputs, rawValues) + } + } + + return await executeIntentCommand({ + client: this.client, + createOptions: this.getCreateOptions(effectivePreparedInputs.inputs), + definition: this.getIntentDefinition(), + output: this.output, + outputPath: this.outputPath, + printUrls: this.printUrls ?? false, + rawValues, + }) + } + + protected override async run(): Promise { + const rawValues = this.getIntentRawValues() + const validationError = this.validateBeforePreparingInputs(rawValues) + if (validationError != null) { + return validationError + } + + const preparedInputs = await this.prepareInputs() + try { + const preparedInputError = this.validatePreparedInputs(preparedInputs) + if (preparedInputError != null) { + return preparedInputError + } + + return await this.executePreparedInputs(rawValues, preparedInputs) + } finally { + await Promise.all(preparedInputs.cleanup.map((cleanup) => cleanup())) + } + } +} + +export abstract class GeneratedWatchableFileIntentCommand extends GeneratedFileIntentCommandBase { + watch = watchOption() + + concurrency = concurrencyOption() + + protected override getCreateOptions( + inputs: string[], + ): Omit { + return { + ...super.getCreateOptions(inputs), + concurrency: this.concurrency, + watch: this.watch, + } + } + + protected override validateBeforePreparingInputs( + rawValues: Record, + ): number | undefined { + const validationError = super.validateBeforePreparingInputs(rawValues) + if (validationError != null) { + return validationError + } + + const sharedValidationError = validateSharedFileProcessingOptions({ + explicitInputCount: this.getProvidedInputCount(), + singleAssembly: this.getSingleAssemblyEnabled(), + watch: this.watch, + watchRequiresInputsMessage: `${this.getIntentDefinition().commandLabel} --watch requires --input or --input-base64`, + }) + if (sharedValidationError != null) { + this.output.error(sharedValidationError) + return 1 + } + + if (this.watch && this.hasTransientInputSources()) { + this.output.error('--watch is only supported for filesystem inputs') + return 1 + } + + return undefined + } + + protected getSingleAssemblyEnabled(): boolean { + return false + } + + protected override validatePreparedInputs( + preparedInputs: PreparedIntentInputs, + ): number | undefined { + if (this.watch && preparedInputs.hasTransientInputs) { + this.output.error('--watch is only supported for filesystem inputs') + return 1 + } + return undefined + } +} + +export abstract class GeneratedStandardFileIntentCommand extends GeneratedWatchableFileIntentCommand { + singleAssembly = singleAssemblyOption() + + protected override getSingleAssemblyEnabled(): boolean { + return this.singleAssembly + } + + protected override getCreateOptions( + inputs: string[], + ): Omit { + return { + ...super.getCreateOptions(inputs), + singleAssembly: this.singleAssembly, + } + } + + protected override validateBeforePreparingInputs( + rawValues: Record, + ): number | undefined { + const validationError = super.validateBeforePreparingInputs(rawValues) + if (validationError != null) { + return validationError + } + + if ( + this.singleAssembly && + (this.getProvidedInputCount() > 1 || + this.inputs.some((inputPath) => { + try { + return statSync(inputPath).isDirectory() + } catch { + return false + } + })) && + this.outputPath != null && + !this.isDirectoryOutputTarget() + ) { + this.output.error( + 'Output must be a directory when using --single-assembly with multiple inputs', + ) + return 1 + } + + return undefined + } +} + +export abstract class GeneratedBundledFileIntentCommand extends GeneratedFileIntentCommandBase { + protected override getCreateOptions( + inputs: string[], + ): Omit { + return { + ...super.getCreateOptions(inputs), + singleAssembly: true, + } + } +} diff --git a/packages/node/src/cli/resultFiles.ts b/packages/node/src/cli/resultFiles.ts new file mode 100644 index 00000000..ee2aa970 --- /dev/null +++ b/packages/node/src/cli/resultFiles.ts @@ -0,0 +1,105 @@ +export interface AssemblyResultEntryLike { + basename?: unknown + ext?: unknown + name?: unknown + ssl_url?: unknown + url?: unknown +} + +export interface NormalizedAssemblyResultFile { + file: AssemblyResultEntryLike + name: string + stepName: string + url: string +} + +export interface NormalizedAssemblyResults { + allFiles: NormalizedAssemblyResultFile[] + entries: Array<[string, Array]> +} + +function isAssemblyResultEntryLike(value: unknown): value is AssemblyResultEntryLike { + return value != null && typeof value === 'object' +} + +function normalizeAssemblyResultName( + stepName: string, + file: AssemblyResultEntryLike, +): string | null { + if (typeof file.name === 'string') { + return file.name + } + + if (typeof file.basename === 'string') { + if (typeof file.ext === 'string' && file.ext.length > 0) { + return `${file.basename}.${file.ext}` + } + + return file.basename + } + + return `${stepName}_result` +} + +function normalizeAssemblyResultUrl(file: AssemblyResultEntryLike): string | null { + if (typeof file.ssl_url === 'string') { + return file.ssl_url + } + + if (typeof file.url === 'string') { + return file.url + } + + return null +} + +function normalizeAssemblyResultFile( + stepName: string, + value: unknown, +): NormalizedAssemblyResultFile | null { + if (!isAssemblyResultEntryLike(value)) { + return null + } + + const url = normalizeAssemblyResultUrl(value) + const name = normalizeAssemblyResultName(stepName, value) + if (url == null || name == null) { + return null + } + + return { + file: value, + name, + stepName, + url, + } +} + +export function normalizeAssemblyResults(results: unknown): NormalizedAssemblyResults { + if (results == null || typeof results !== 'object' || Array.isArray(results)) { + return { + allFiles: [], + entries: [], + } + } + + const files: NormalizedAssemblyResultFile[] = [] + const entries = Object.entries(results) + for (const [stepName, stepResults] of entries) { + if (!Array.isArray(stepResults)) { + continue + } + + for (const stepResult of stepResults) { + const normalized = normalizeAssemblyResultFile(stepName, stepResult) + if (normalized != null) { + files.push(normalized) + } + } + } + + return { + allFiles: files, + entries, + } +} diff --git a/packages/node/src/cli/resultUrls.ts b/packages/node/src/cli/resultUrls.ts new file mode 100644 index 00000000..5b6a97ef --- /dev/null +++ b/packages/node/src/cli/resultUrls.ts @@ -0,0 +1,72 @@ +import type { IOutputCtl } from './OutputCtl.ts' +import type { NormalizedAssemblyResults } from './resultFiles.ts' +import { normalizeAssemblyResults } from './resultFiles.ts' + +export interface ResultUrlRow { + assemblyId: string + name: string + step: string + url: string +} + +export function collectResultUrlRows({ + assemblyId, + results, +}: { + assemblyId: string + results: unknown +}): ResultUrlRow[] { + return collectNormalizedResultUrlRows({ + assemblyId, + normalizedResults: normalizeAssemblyResults(results), + }) +} + +export function collectNormalizedResultUrlRows({ + assemblyId, + normalizedResults, +}: { + assemblyId: string + normalizedResults: NormalizedAssemblyResults +}): ResultUrlRow[] { + return normalizedResults.allFiles.map((file) => ({ + assemblyId, + step: file.stepName, + name: file.name, + url: file.url, + })) +} + +export function formatResultUrlRows(rows: readonly ResultUrlRow[]): string { + if (rows.length === 0) { + return '' + } + + const includeAssembly = new Set(rows.map((row) => row.assemblyId)).size > 1 + const headers = includeAssembly ? ['ASSEMBLY', 'STEP', 'NAME', 'URL'] : ['STEP', 'NAME', 'URL'] + const tableRows = rows.map((row) => + includeAssembly ? [row.assemblyId, row.step, row.name, row.url] : [row.step, row.name, row.url], + ) + + const widths = headers.map((header, index) => + Math.max(header.length, ...tableRows.map((row) => row[index]?.length ?? 0)), + ) + + return [headers, ...tableRows] + .map((row) => + row + .map((value, index) => + index === row.length - 1 ? value : value.padEnd(widths[index] ?? value.length), + ) + .join(' '), + ) + .join('\n') +} + +export function printResultUrls(output: IOutputCtl, rows: readonly ResultUrlRow[]): void { + if (rows.length === 0) { + return + } + + output.print(formatResultUrlRows(rows), { urls: rows }) +} diff --git a/packages/node/src/cli/semanticIntents/imageDescribe.ts b/packages/node/src/cli/semanticIntents/imageDescribe.ts new file mode 100644 index 00000000..a1b2beab --- /dev/null +++ b/packages/node/src/cli/semanticIntents/imageDescribe.ts @@ -0,0 +1,254 @@ +import { parseStringArrayValue } from '../intentFields.ts' +import type { + IntentDynamicStepExecutionDefinition, + IntentOptionDefinition, +} from '../intentRuntime.ts' +import type { SemanticIntentDescriptor, SemanticIntentPresentation } from './index.ts' +import { parseOptionalEnumValue, parseUniqueEnumArray } from './parsing.ts' + +const imageDescribeFields = ['labels', 'altText', 'title', 'caption', 'description'] as const + +type ImageDescribeField = (typeof imageDescribeFields)[number] + +const wordpressDescribeFields = [ + 'altText', + 'title', + 'caption', + 'description', +] as const satisfies readonly ImageDescribeField[] + +const defaultDescribeModel = 'anthropic/claude-4-sonnet-20250514' +const describeFieldDescriptions = { + altText: 'A concise accessibility-focused alt text that objectively describes the image', + title: 'A concise publishable title for the image', + caption: 'A short caption suitable for displaying below the image', + description: 'A richer description of the image suitable for CMS usage', +} as const satisfies Record, string> + +const imageDescribeExecutionDefinition = { + kind: 'dynamic-step', + handler: 'image-describe', + resultStepName: 'describe', + fields: [ + { + name: 'fields', + kind: 'string-array', + propertyName: 'fields', + optionFlags: '--fields', + description: + 'Describe output fields to generate, for example labels or altText,title,caption,description', + required: false, + }, + { + name: 'forProfile', + kind: 'string', + propertyName: 'forProfile', + optionFlags: '--for', + description: 'Use a named output profile, currently: wordpress', + required: false, + }, + { + name: 'model', + kind: 'string', + propertyName: 'model', + optionFlags: '--model', + description: + 'Model to use for generated text fields (default: anthropic/claude-4-sonnet-20250514)', + required: false, + }, + ] as const satisfies readonly IntentOptionDefinition[], +} satisfies IntentDynamicStepExecutionDefinition + +const imageDescribeCommandPresentation = { + description: 'Describe images as labels or publishable text fields', + details: + 'Generates image labels through `/image/describe`, or structured altText/title/caption/description through `/ai/chat`, then writes the JSON result to `--out`.', + examples: [ + [ + 'Describe an image as labels', + 'transloadit image describe --input hero.jpg --out labels.json', + ], + [ + 'Generate WordPress-ready fields', + 'transloadit image describe --input hero.jpg --for wordpress --out fields.json', + ], + [ + 'Request a custom field set', + 'transloadit image describe --input hero.jpg --fields altText,title,caption --out fields.json', + ], + ] as Array<[string, string]>, +} as const satisfies SemanticIntentPresentation + +function parseDescribeFields(value: string[] | undefined): ImageDescribeField[] { + const rawFields = parseStringArrayValue(value ?? []) + return parseUniqueEnumArray({ + flagName: '--fields', + supportedValues: imageDescribeFields, + values: rawFields, + }) +} + +function resolveDescribeProfile(profile: string | undefined): 'wordpress' | null { + return parseOptionalEnumValue({ + flagName: '--for', + supportedValues: ['wordpress'] as const, + value: profile, + }) +} + +function resolveRequestedDescribeFields({ + explicitFields, + profile, +}: { + explicitFields: ImageDescribeField[] + profile: 'wordpress' | null +}): ImageDescribeField[] { + if (explicitFields.length > 0) { + return explicitFields + } + + if (profile === 'wordpress') { + return [...wordpressDescribeFields] + } + + return ['labels'] +} + +function validateDescribeFields({ + fields, + model, + profile, +}: { + fields: ImageDescribeField[] + model: string + profile: 'wordpress' | null +}): void { + const includesLabels = fields.includes('labels') + + if (includesLabels && fields.length > 1) { + throw new Error( + 'The labels field cannot be combined with altText, title, caption, or description', + ) + } + + if (includesLabels && profile != null) { + throw new Error('--for cannot be combined with --fields labels') + } + + if (includesLabels && model !== defaultDescribeModel) { + throw new Error( + '--model is only supported when generating altText, title, caption, or description', + ) + } +} + +function resolveImageDescribeRequest(rawValues: Record): { + fields: ImageDescribeField[] + profile: 'wordpress' | null +} { + const explicitFields = parseDescribeFields(rawValues.fields as string[] | undefined) + const profile = resolveDescribeProfile(rawValues.forProfile as string | undefined) + const fields = resolveRequestedDescribeFields({ explicitFields, profile }) + validateDescribeFields({ + fields, + model: String(rawValues.model ?? defaultDescribeModel), + profile, + }) + + return { fields, profile } +} + +function buildDescribeAiChatSchema(fields: readonly ImageDescribeField[]): Record { + const properties = Object.fromEntries( + fields.map((field) => { + return [ + field, + { + type: 'string', + description: describeFieldDescriptions[field as Exclude], + }, + ] + }), + ) + + return { + type: 'object', + additionalProperties: false, + required: [...fields], + properties, + } +} + +function buildDescribeAiChatMessages({ + fields, + profile, +}: { + fields: readonly ImageDescribeField[] + profile: 'wordpress' | null +}): { + messages: string + systemMessage: string +} { + const requestedFields = fields.join(', ') + const profileHint = + profile === 'wordpress' + ? 'The output is for the WordPress media library.' + : 'The output is for a publishing workflow.' + + return { + systemMessage: [ + 'You generate accurate image copy for publishing workflows.', + profileHint, + 'Return only the schema fields requested.', + 'Be concrete, concise, and faithful to what is visibly present in the image.', + 'Do not invent facts, brands, locations, or identities that are not clearly visible.', + 'Avoid keyword stuffing, hype, and mentions of SEO or accessibility in the output itself.', + 'For altText, write one objective sentence focused on what matters to someone who cannot see the image.', + 'For title, keep it short and natural.', + 'For caption, write one short sentence suitable for publication.', + 'For description, write one or two sentences with slightly more context than the caption.', + ].join(' '), + messages: `Analyze the attached image and fill these fields: ${requestedFields}.`, + } +} + +function createImageDescribeStep(rawValues: Record): Record { + const { fields, profile } = resolveImageDescribeRequest(rawValues) + if (fields.length === 1 && fields[0] === 'labels') { + return { + robot: '/image/describe', + use: ':original', + result: true, + provider: 'aws', + format: 'json', + granularity: 'list', + explicit_descriptions: false, + } + } + + const { messages, systemMessage } = buildDescribeAiChatMessages({ fields, profile }) + + return { + robot: '/ai/chat', + use: ':original', + result: true, + model: String(rawValues.model ?? defaultDescribeModel), + format: 'json', + return_messages: 'last', + test_credentials: true, + schema: JSON.stringify(buildDescribeAiChatSchema(fields)), + messages, + system_message: systemMessage, + // @TODO Move these inline /ai/chat instructions into a builtin template in api2 and + // switch this command to call that builtin instead of shipping prompt logic in the CLI. + } +} + +export const imageDescribeSemanticIntentDescriptor = { + createStep: createImageDescribeStep, + execution: imageDescribeExecutionDefinition, + inputPolicy: { kind: 'required' }, + outputDescription: 'Write the JSON result to this path or directory', + presentation: imageDescribeCommandPresentation, + runnerKind: 'watchable', +} as const satisfies SemanticIntentDescriptor diff --git a/packages/node/src/cli/semanticIntents/index.ts b/packages/node/src/cli/semanticIntents/index.ts new file mode 100644 index 00000000..c7abab06 --- /dev/null +++ b/packages/node/src/cli/semanticIntents/index.ts @@ -0,0 +1,48 @@ +import type { IntentInputPolicy } from '../intentInputPolicy.ts' +import type { + IntentDynamicStepExecutionDefinition, + IntentRunnerKind, + PreparedIntentInputs, +} from '../intentRuntime.ts' +import { imageDescribeSemanticIntentDescriptor } from './imageDescribe.ts' +import { + markdownDocxSemanticIntentDescriptor, + markdownPdfSemanticIntentDescriptor, +} from './markdownPdf.ts' + +export interface SemanticIntentPresentation { + description: string + details: string + examples: Array<[string, string]> +} + +export interface SemanticIntentDescriptor { + createStep: (rawValues: Record) => Record + execution: IntentDynamicStepExecutionDefinition + inputPolicy: IntentInputPolicy + outputDescription: string + prepareInputs?: ( + preparedInputs: PreparedIntentInputs, + rawValues: Record, + ) => Promise + presentation: SemanticIntentPresentation + runnerKind: IntentRunnerKind +} + +const semanticIntentDescriptors: Record = { + 'image-describe': imageDescribeSemanticIntentDescriptor, + 'markdown-pdf': { + ...markdownPdfSemanticIntentDescriptor, + }, + 'markdown-docx': { + ...markdownDocxSemanticIntentDescriptor, + }, +} + +export function getSemanticIntentDescriptor(name: string): SemanticIntentDescriptor { + if (!(name in semanticIntentDescriptors)) { + throw new Error(`Semantic intent descriptor does not exist for "${name}"`) + } + + return semanticIntentDescriptors[name] +} diff --git a/packages/node/src/cli/semanticIntents/markdownPdf.ts b/packages/node/src/cli/semanticIntents/markdownPdf.ts new file mode 100644 index 00000000..70691b24 --- /dev/null +++ b/packages/node/src/cli/semanticIntents/markdownPdf.ts @@ -0,0 +1,120 @@ +import type { IntentOptionDefinition } from '../intentRuntime.ts' +import type { SemanticIntentDescriptor, SemanticIntentPresentation } from './index.ts' +import { parseOptionalEnumValue } from './parsing.ts' + +const defaultMarkdownFormat = 'gfm' +const defaultMarkdownTheme = 'github' +const markdownFormats = ['commonmark', 'gfm'] as const +const markdownThemes = ['bare', 'github'] as const + +function resolveMarkdownFormat(value: unknown): 'commonmark' | 'gfm' { + return ( + parseOptionalEnumValue({ + flagName: '--markdown-format', + supportedValues: markdownFormats, + value, + }) ?? defaultMarkdownFormat + ) +} + +function resolveMarkdownTheme(value: unknown): 'bare' | 'github' { + return ( + parseOptionalEnumValue({ + flagName: '--markdown-theme', + supportedValues: markdownThemes, + value, + }) ?? defaultMarkdownTheme + ) +} + +const markdownOptionDefinitions = [ + { + name: 'markdownFormat', + kind: 'string', + propertyName: 'markdownFormat', + optionFlags: '--markdown-format', + description: 'Markdown variant to parse, either commonmark or gfm', + required: false, + }, + { + name: 'markdownTheme', + kind: 'string', + propertyName: 'markdownTheme', + optionFlags: '--markdown-theme', + description: 'Markdown theme to render, either github or bare', + required: false, + }, +] as const satisfies readonly IntentOptionDefinition[] + +function createMarkdownConvertSemanticIntent({ + description, + details, + exampleOutput, + format, + handler, +}: { + description: string + details: string + exampleOutput: string + format: 'docx' | 'pdf' + handler: 'markdown-docx' | 'markdown-pdf' +}): SemanticIntentDescriptor { + const formatLabel = format.toUpperCase() + const presentation = { + description, + details, + examples: [ + [ + `Render a Markdown file as a ${formatLabel} file`, + `transloadit markdown ${format} --input README.md --out ${exampleOutput}`, + ], + [ + 'Print a temporary result URL without downloading locally', + `transloadit markdown ${format} --input README.md --print-urls`, + ], + ], + } satisfies SemanticIntentPresentation + + return { + createStep(rawValues) { + return { + robot: '/document/convert', + use: ':original', + result: true, + format, + markdown_format: resolveMarkdownFormat(rawValues.markdownFormat), + markdown_theme: resolveMarkdownTheme(rawValues.markdownTheme), + // @TODO Replace this semantic CLI alias with a builtin/api2-owned command surface if we later + // want richer Markdown conversion semantics beyond `/document/convert`. + } + }, + execution: { + kind: 'dynamic-step', + handler, + resultStepName: 'convert', + fields: markdownOptionDefinitions, + }, + inputPolicy: { kind: 'required' }, + outputDescription: `Write the rendered ${formatLabel} to this path or directory`, + presentation, + runnerKind: 'watchable', + } +} + +export const markdownPdfSemanticIntentDescriptor = createMarkdownConvertSemanticIntent({ + description: 'Render Markdown files as PDFs', + details: + 'Runs `/document/convert` with `format: pdf`, letting the backend render Markdown and preserve features such as internal heading links in the generated PDF.', + exampleOutput: 'README.pdf', + format: 'pdf', + handler: 'markdown-pdf', +}) + +export const markdownDocxSemanticIntentDescriptor = createMarkdownConvertSemanticIntent({ + description: 'Render Markdown files as DOCX documents', + details: + 'Runs `/document/convert` with `format: docx`, letting the backend render Markdown and convert it into a Word document.', + exampleOutput: 'README.docx', + format: 'docx', + handler: 'markdown-docx', +}) diff --git a/packages/node/src/cli/semanticIntents/parsing.ts b/packages/node/src/cli/semanticIntents/parsing.ts new file mode 100644 index 00000000..6b78cdec --- /dev/null +++ b/packages/node/src/cli/semanticIntents/parsing.ts @@ -0,0 +1,56 @@ +export function parseOptionalEnumValue({ + flagName, + supportedValues, + value, +}: { + flagName: string + supportedValues: readonly TValue[] + value: unknown +}): TValue | null { + if (value == null || value === '') { + return null + } + + if (typeof value === 'string' && supportedValues.includes(value as TValue)) { + return value as TValue + } + + throw new Error( + `Unsupported ${flagName} value "${String(value)}". Supported values: ${supportedValues.join(', ')}`, + ) +} + +export function parseUniqueEnumArray({ + flagName, + supportedValues, + values, +}: { + flagName: string + supportedValues: readonly TValue[] + values: readonly string[] +}): TValue[] { + if (values.length === 0) { + return [] + } + + const parsedValues: TValue[] = [] + const seen = new Set() + + for (const value of values) { + if (!supportedValues.includes(value as TValue)) { + throw new Error( + `Unsupported ${flagName} value "${value}". Supported values: ${supportedValues.join(', ')}`, + ) + } + + const parsedValue = value as TValue + if (seen.has(parsedValue)) { + continue + } + + seen.add(parsedValue) + parsedValues.push(parsedValue) + } + + return parsedValues +} diff --git a/packages/node/src/cli/stepsInput.ts b/packages/node/src/cli/stepsInput.ts new file mode 100644 index 00000000..c3daf224 --- /dev/null +++ b/packages/node/src/cli/stepsInput.ts @@ -0,0 +1,32 @@ +import fsp from 'node:fs/promises' + +import type { StepsInput } from '../alphalib/types/template.ts' +import { stepsSchema } from '../alphalib/types/template.ts' + +export function parseStepsInputJson(content: string): StepsInput { + const parsed: unknown = JSON.parse(content) + const validated = stepsSchema.safeParse(parsed) + if (!validated.success) { + throw new Error(`Invalid steps format: ${validated.error.message}`) + } + + const parsedSteps = parsed as Record> + const validatedSteps = validated.data as Record> + + return Object.fromEntries( + Object.entries(parsedSteps).map(([stepName, stepInput]) => { + const normalizedStep = validatedSteps[stepName] ?? {} + return [ + stepName, + Object.fromEntries( + Object.keys(stepInput).map((key) => [key, normalizedStep[key] ?? stepInput[key]]), + ), + ] + }), + ) as StepsInput +} + +export async function readStepsInputFile(filePath: string): Promise { + const content = await fsp.readFile(filePath, 'utf8') + return parseStepsInputJson(content) +} diff --git a/packages/node/src/ensureUniqueCounter.ts b/packages/node/src/ensureUniqueCounter.ts new file mode 100644 index 00000000..baf1ffbf --- /dev/null +++ b/packages/node/src/ensureUniqueCounter.ts @@ -0,0 +1,75 @@ +const uniqueCounterScopes = new WeakMap>() + +async function runEnsureUniqueCounterValue({ + initialValue, + isTaken, + reserve, + nextValue, +}: { + initialValue: T + isTaken: (candidate: T) => Promise | boolean + reserve: (candidate: T) => void + nextValue: (counter: number) => T +}): Promise { + let candidate = initialValue + let counter = 1 + + while (await isTaken(candidate)) { + candidate = nextValue(counter) + counter += 1 + } + + reserve(candidate) + return candidate +} + +export async function ensureUniqueCounterValue({ + initialValue, + isTaken, + reserve, + nextValue, + scope, +}: { + initialValue: T + isTaken: (candidate: T) => Promise | boolean + reserve: (candidate: T) => void + nextValue: (counter: number) => T + scope?: object +}): Promise { + if (scope == null) { + return await runEnsureUniqueCounterValue({ + initialValue, + isTaken, + reserve, + nextValue, + }) + } + + const previous = uniqueCounterScopes.get(scope) ?? Promise.resolve() + let releaseScope: (() => void) | undefined + const pendingScope = new Promise((resolve) => { + releaseScope = resolve + }) + const currentScope = previous + .catch(() => undefined) + .then(async () => { + await pendingScope + }) + uniqueCounterScopes.set(scope, currentScope) + + await previous.catch(() => undefined) + + try { + return await runEnsureUniqueCounterValue({ + initialValue, + isTaken, + reserve, + nextValue, + }) + } finally { + releaseScope?.() + if (uniqueCounterScopes.get(scope) === currentScope) { + uniqueCounterScopes.delete(scope) + } + } +} diff --git a/packages/node/src/inputFiles.ts b/packages/node/src/inputFiles.ts index 00f7acdf..febc343a 100644 --- a/packages/node/src/inputFiles.ts +++ b/packages/node/src/inputFiles.ts @@ -1,13 +1,17 @@ +import * as dnsPromises from 'node:dns/promises' import { createWriteStream } from 'node:fs' import { mkdtemp, rm, writeFile } from 'node:fs/promises' import { isIP } from 'node:net' import { tmpdir } from 'node:os' -import { basename, join } from 'node:path' +import { basename, join, parse } from 'node:path' import type { Readable } from 'node:stream' import { pipeline } from 'node:stream/promises' +import type CacheableLookup from 'cacheable-lookup' +import type { EntryObject, IPFamily } from 'cacheable-lookup' import got from 'got' import type { Input as IntoStreamInput } from 'into-stream' import type { CreateAssemblyParams } from './apiTypes.ts' +import { ensureUniqueCounterValue } from './ensureUniqueCounter.ts' export type InputFile = | { @@ -63,15 +67,28 @@ const ensureUnique = (field: string, used: Set): void => { used.add(field) } -const ensureUniqueStepName = (baseName: string, used: Set): string => { - let name = baseName - let counter = 1 - while (used.has(name)) { - name = `${baseName}_${counter}` - counter += 1 - } - used.add(name) - return name +const ensureUniqueStepName = async (baseName: string, used: Set): Promise => + await ensureUniqueCounterValue({ + initialValue: baseName, + isTaken: (candidate) => used.has(candidate), + reserve: (candidate) => used.add(candidate), + nextValue: (counter) => `${baseName}_${counter}`, + scope: used, + }) + +const ensureUniqueTempFilePath = async ( + root: string, + filename: string, + used: Set, +): Promise => { + const parsedFilename = parse(basename(filename)) + return await ensureUniqueCounterValue({ + initialValue: join(root, parsedFilename.base), + isTaken: (candidate) => used.has(candidate), + reserve: (candidate) => used.add(candidate), + nextValue: (counter) => join(root, `${parsedFilename.name}-${counter}${parsedFilename.ext}`), + scope: used, + }) } const decodeBase64 = (value: string): Buffer => Buffer.from(value, 'base64') @@ -106,27 +123,72 @@ const findImportStepName = (field: string, steps: Record): stri return null } -const downloadUrlToFile = async (url: string, filePath: string): Promise => { - await pipeline(got.stream(url), createWriteStream(filePath)) +const MAX_URL_REDIRECTS = 10 + +const isRedirectStatusCode = (statusCode: number): boolean => + statusCode === 301 || + statusCode === 302 || + statusCode === 303 || + statusCode === 307 || + statusCode === 308 + +const ipv4FromMappedIpv6 = (address: string): string | null => { + const lowerAddress = address.toLowerCase() + const mappedPrefix = lowerAddress.startsWith('::ffff:') + ? '::ffff:' + : lowerAddress.startsWith('0:0:0:0:0:ffff:') + ? '0:0:0:0:0:ffff:' + : null + + if (mappedPrefix == null) { + return null + } + + const mappedValue = lowerAddress.slice(mappedPrefix.length) + if (mappedValue.includes('.')) { + return mappedValue + } + + const segments = mappedValue.split(':') + if (segments.length !== 2) { + return null + } + + const values = segments.map((segment) => Number.parseInt(segment, 16)) + if (values.some((value) => Number.isNaN(value) || value < 0 || value > 0xffff)) { + return null + } + + return values.flatMap((value) => [(value >> 8) & 0xff, value & 0xff]).join('.') } const isPrivateIp = (address: string): boolean => { - if (address === 'localhost') return true - const family = isIP(address) + const normalizedAddress = + address.startsWith('[') && address.endsWith(']') ? address.slice(1, -1) : address + if (normalizedAddress === 'localhost') return true + const family = isIP(normalizedAddress) if (family === 4) { - const parts = address.split('.').map((chunk) => Number(chunk)) + const parts = normalizedAddress.split('.').map((chunk) => Number(chunk)) const [a, b] = parts if (a === 10) return true if (a === 127) return true if (a === 0) return true + if (a === 100 && b >= 64 && b <= 127) return true if (a === 169 && b === 254) return true if (a === 172 && b >= 16 && b <= 31) return true + if (a === 192 && b === 0 && parts[2] === 0) return true if (a === 192 && b === 168) return true + if (a === 198 && (b === 18 || b === 19)) return true return false } if (family === 6) { - const normalized = address.toLowerCase() - if (normalized === '::1') return true + const normalized = normalizedAddress.toLowerCase().split('%')[0] + if (normalized === '::1' || normalized === '0:0:0:0:0:0:0:1') return true + if (normalized === '::' || normalized === '0:0:0:0:0:0:0:0') return true + const mappedAddress = ipv4FromMappedIpv6(normalized) + if (mappedAddress != null && isPrivateIp(mappedAddress)) { + return true + } if (normalized.startsWith('fe80:')) return true if (normalized.startsWith('fc') || normalized.startsWith('fd')) return true return false @@ -134,14 +196,201 @@ const isPrivateIp = (address: string): boolean => { return false } -const assertPublicDownloadUrl = (value: string): void => { +export const resolvePublicDownloadAddresses = async ( + value: string, +): Promise> => { const parsed = new URL(value) + const hostname = + parsed.hostname.startsWith('[') && parsed.hostname.endsWith(']') + ? parsed.hostname.slice(1, -1) + : parsed.hostname if (!['http:', 'https:'].includes(parsed.protocol)) { throw new Error(`URL downloads are limited to http/https: ${value}`) } - if (isPrivateIp(parsed.hostname)) { + if (isPrivateIp(hostname)) { throw new Error(`URL downloads are limited to public hosts: ${value}`) } + + const literalFamily = isIP(hostname) + const resolvedAddresses = + literalFamily !== 0 + ? [{ address: hostname, family: literalFamily as 4 | 6 }] + : await dnsPromises.lookup(hostname, { + all: true, + verbatim: true, + }) + if (resolvedAddresses.some((address) => isPrivateIp(address.address))) { + throw new Error(`URL downloads are limited to public hosts: ${value}`) + } + + if (resolvedAddresses.length === 0) { + throw new Error(`Unable to resolve URL hostname: ${value}`) + } + + return resolvedAddresses.map((address) => ({ + address: address.address, + family: address.family as 4 | 6, + })) +} + +export function createPinnedDnsLookup( + validatedAddresses: Array<{ address: string; family: 4 | 6 }>, +): CacheableLookup['lookup'] { + const pinnedAddresses = [...validatedAddresses] + + function pickAddress(family?: IPFamily): { address: string; family: 4 | 6 } | null { + if (family == null) { + return pinnedAddresses[0] ?? null + } + + return pinnedAddresses.find((address) => address.family === family) ?? null + } + + function pinnedDnsLookup( + _hostname: string, + family: IPFamily, + callback: (error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void, + ): void + function pinnedDnsLookup( + _hostname: string, + callback: (error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void, + ): void + function pinnedDnsLookup( + _hostname: string, + options: { all: true }, + callback: (error: NodeJS.ErrnoException | null, result: ReadonlyArray) => void, + ): void + function pinnedDnsLookup( + _hostname: string, + options: object, + callback: (error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void, + ): void + function pinnedDnsLookup( + _hostname: string, + familyOrCallback: + | IPFamily + | object + | ((error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void), + callback?: + | ((error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void) + | ((error: NodeJS.ErrnoException | null, result: ReadonlyArray) => void), + ): void { + if (typeof familyOrCallback === 'function') { + const address = pickAddress() + if (address == null) { + familyOrCallback( + new Error('No validated addresses available') as NodeJS.ErrnoException, + '', + 4, + ) + return + } + familyOrCallback(null, address.address, address.family) + return + } + + if ( + typeof familyOrCallback === 'object' && + familyOrCallback != null && + 'all' in familyOrCallback + ) { + ;( + callback as ( + error: NodeJS.ErrnoException | null, + result: ReadonlyArray, + ) => void + )( + null, + pinnedAddresses.map((address) => ({ + address: address.address, + family: address.family, + expires: 0, + })), + ) + return + } + + const family = typeof familyOrCallback === 'number' ? familyOrCallback : undefined + const address = pickAddress(family) + if (address == null) { + ;( + callback as (error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void + )(new Error('No validated addresses available') as NodeJS.ErrnoException, '', family ?? 4) + return + } + + ;(callback as (error: NodeJS.ErrnoException | null, address: string, family: IPFamily) => void)( + null, + address.address, + address.family, + ) + } + + return pinnedDnsLookup +} + +const downloadUrlToFile = async ({ + allowPrivateUrls, + filePath, + url, +}: { + allowPrivateUrls: boolean + filePath: string + url: string +}): Promise => { + let currentUrl = url + + for (let redirectCount = 0; redirectCount <= MAX_URL_REDIRECTS; redirectCount += 1) { + let validatedAddresses: Array<{ address: string; family: 4 | 6 }> | null = null + if (!allowPrivateUrls) { + validatedAddresses = await resolvePublicDownloadAddresses(currentUrl) + } + + const dnsLookup: CacheableLookup['lookup'] | undefined = + validatedAddresses == null ? undefined : createPinnedDnsLookup(validatedAddresses) + + const responseStream = got.stream(currentUrl, { + dnsLookup, + followRedirect: false, + retry: { limit: 0 }, + throwHttpErrors: false, + }) + + const response = await new Promise< + Readable & { headers: Record; statusCode?: number } + >((resolvePromise, reject) => { + responseStream.once('response', (incomingResponse) => { + resolvePromise( + incomingResponse as Readable & { + headers: Record + statusCode?: number + }, + ) + }) + responseStream.once('error', reject) + }) + + const statusCode = response.statusCode ?? 0 + if (isRedirectStatusCode(statusCode)) { + responseStream.destroy() + const location = response.headers.location + if (location == null) { + throw new Error(`Redirect response missing Location header: ${currentUrl}`) + } + currentUrl = new URL(location, currentUrl).toString() + continue + } + + if (statusCode >= 400) { + responseStream.destroy() + throw new Error(`Failed to download URL: ${currentUrl} (${statusCode})`) + } + + await pipeline(responseStream, createWriteStream(filePath)) + return + } + + throw new Error(`Too many redirects while downloading URL input: ${url}`) } export const prepareInputFiles = async ( @@ -176,6 +425,7 @@ export const prepareInputFiles = async ( const steps = isRecord(nextParams.steps) ? { ...nextParams.steps } : {} const usedSteps = new Set(Object.keys(steps)) const usedFields = new Set() + const usedTempPaths = new Set() const importUrlsByStep = new Map() const importStepNames = Object.keys(steps).filter((name) => isHttpImportStep(steps[name])) const sharedImportStep = importStepNames.length === 1 ? importStepNames[0] : null @@ -211,7 +461,7 @@ export const prepareInputFiles = async ( if (base64Strategy === 'tempfile') { const root = await ensureTempRoot() const filename = file.filename ? basename(file.filename) : `${file.field}.bin` - const filePath = join(root, filename) + const filePath = await ensureUniqueTempFilePath(root, filename, usedTempPaths) await writeFile(filePath, buffer) files[file.field] = filePath } else { @@ -226,7 +476,7 @@ export const prepareInputFiles = async ( urlStrategy === 'import' || (urlStrategy === 'import-if-present' && targetStep) if (shouldImport) { - const stepName = targetStep ?? ensureUniqueStepName(file.field, usedSteps) + const stepName = targetStep ?? (await ensureUniqueStepName(file.field, usedSteps)) const urls = importUrlsByStep.get(stepName) ?? [] urls.push(file.url) importUrlsByStep.set(stepName, urls) @@ -238,11 +488,12 @@ export const prepareInputFiles = async ( (file.filename ? basename(file.filename) : null) ?? getFilenameFromUrl(file.url) ?? `${file.field}.bin` - const filePath = join(root, filename) - if (!allowPrivateUrls) { - assertPublicDownloadUrl(file.url) - } - await downloadUrlToFile(file.url, filePath) + const filePath = await ensureUniqueTempFilePath(root, filename, usedTempPaths) + await downloadUrlToFile({ + allowPrivateUrls, + filePath, + url: file.url, + }) files[file.field] = filePath } } diff --git a/packages/node/test/e2e/cli/assemblies-create.test.ts b/packages/node/test/e2e/cli/assemblies-create.test.ts index 13e4b1dd..a7dbd661 100644 --- a/packages/node/test/e2e/cli/assemblies-create.test.ts +++ b/packages/node/test/e2e/cli/assemblies-create.test.ts @@ -471,7 +471,7 @@ describeLive('assemblies', () => { ) it( - 'should close streams immediately in single-assembly mode', + 'should avoid opening filesystem streams during single-assembly collection', testCase(async (client) => { // Create multiple input files for single-assembly mode const fileCount = 5 @@ -493,18 +493,15 @@ describeLive('assemblies', () => { const outs = await fsp.readdir('out') expect(outs.length).to.be.greaterThan(0) - // Analyze debug output to verify streams were handled properly. - // The fixed code emits "STREAM CLOSED" when closing streams during collection. - // The unfixed code keeps all streams open until upload, risking fd exhaustion. + // Analyze debug output to verify filesystem inputs were collected as file paths. + // The current implementation only opens upload streams lazily for stdin inputs, + // so there should be no eager "STREAM CLOSED" churn during collection. const debugOutput = output.get(true) as OutputEntry[] const messages = debugOutput.map((e) => String(e.msg)) - // Check that streams were closed during collection (added by the fix) const streamClosedMessages = messages.filter((m) => m.startsWith('STREAM CLOSED')) - expect( - streamClosedMessages.length, - 'Expected "STREAM CLOSED" messages indicating proper fd management', - ).to.be.greaterThan(0) + expect(streamClosedMessages).to.have.lengthOf(0) + expect(messages).to.include(`Creating single assembly with ${fileCount} files`) }), ) }) diff --git a/packages/node/test/support/intentSmokeCases.ts b/packages/node/test/support/intentSmokeCases.ts new file mode 100644 index 00000000..2ba4dc0f --- /dev/null +++ b/packages/node/test/support/intentSmokeCases.ts @@ -0,0 +1,125 @@ +import { + getIntentCatalogKey, + getIntentPaths, + intentCatalog, +} from '../../src/cli/intentCommandSpecs.ts' + +export interface IntentSmokeCase { + args: string[] + key: string + outputPath: string + paths: string[] + verifier: string +} + +const intentSmokeOverrides: Record> = { + '/audio/waveform': { + args: ['--input', '@fixture/input.mp3'], + outputPath: 'audio-waveform.png', + verifier: 'png', + }, + '/document/autorotate': { + args: ['--input', '@fixture/input.pdf'], + outputPath: 'document-auto-rotate.pdf', + verifier: 'pdf', + }, + '/document/convert': { + args: ['--input', '@fixture/input.txt', '--format', 'pdf'], + outputPath: 'document-convert.pdf', + verifier: 'pdf', + }, + '/document/optimize': { + args: ['--input', '@fixture/input.pdf'], + outputPath: 'document-optimize.pdf', + verifier: 'pdf', + }, + '/document/thumbs': { + args: ['--input', '@fixture/input.pdf'], + outputPath: 'document-thumbs', + verifier: 'document-thumbs', + }, + '/file/compress': { + args: ['--input', '@fixture/input.txt', '--format', 'zip'], + outputPath: 'file-compress.zip', + verifier: 'zip', + }, + '/file/decompress': { + args: ['--input', '@fixture/input.zip'], + outputPath: 'file-decompress', + verifier: 'file-decompress', + }, + '/file/preview': { + args: ['--input', '@preview-url', '--width', '300'], + outputPath: 'preview-generate.png', + verifier: 'png', + }, + '/image/bgremove': { + args: ['--input', '@fixture/input.jpg'], + outputPath: 'image-remove-background.png', + verifier: 'png', + }, + '/image/generate': { + args: [ + '--prompt', + 'A small red bicycle on a cream background, studio lighting', + '--model', + 'google/nano-banana', + ], + outputPath: 'image-generate.png', + verifier: 'png', + }, + 'image-describe:image/describe': { + args: ['--input', '@fixture/input.jpg'], + outputPath: 'image-describe.json', + verifier: 'json', + }, + 'markdown-pdf:markdown/pdf': { + args: ['--input', '@fixture/input.md'], + outputPath: 'markdown-pdf.pdf', + verifier: 'pdf', + }, + 'markdown-docx:markdown/docx': { + args: ['--input', '@fixture/input.md'], + outputPath: 'markdown-docx.docx', + verifier: 'docx', + }, + '/image/optimize': { + args: ['--input', '@fixture/input.jpg'], + outputPath: 'image-optimize.jpg', + verifier: 'jpeg', + }, + '/image/resize': { + args: ['--input', '@fixture/input.jpg', '--width', '200'], + outputPath: 'image-resize.jpg', + verifier: 'jpeg', + }, + '/text/speak': { + args: ['--prompt', 'Hello from the Transloadit Node CLI intents test.', '--provider', 'aws'], + outputPath: 'text-speak.mp3', + verifier: 'mp3', + }, + '/video/thumbs': { + args: ['--input', '@fixture/input.mp4'], + outputPath: 'video-thumbs', + verifier: 'video-thumbs', + }, + 'builtin/encode-hls-video@latest': { + args: ['--input', '@fixture/input.mp4'], + outputPath: 'video-encode-hls', + verifier: 'video-encode-hls', + }, +} + +export const intentSmokeCases = intentCatalog.map((intent) => { + const key = getIntentCatalogKey(intent) + const smokeCase = intentSmokeOverrides[key] + if (smokeCase == null) { + throw new Error(`Missing smoke-case definition for ${key}`) + } + + return { + ...smokeCase, + key, + paths: getIntentPaths(intent), + } +}) satisfies IntentSmokeCase[] diff --git a/packages/node/test/unit/cli/assemblies-create.test.ts b/packages/node/test/unit/cli/assemblies-create.test.ts new file mode 100644 index 00000000..74460c5e --- /dev/null +++ b/packages/node/test/unit/cli/assemblies-create.test.ts @@ -0,0 +1,1363 @@ +import { EventEmitter } from 'node:events' +import { mkdir, mkdtemp, readdir, readFile, rm, stat, utimes, writeFile } from 'node:fs/promises' +import { tmpdir } from 'node:os' +import path from 'node:path' +import { setTimeout as delay } from 'node:timers/promises' +import tty from 'node:tty' +import nock from 'nock' +import { afterEach, describe, expect, it, vi } from 'vitest' + +import { create } from '../../../src/cli/commands/assemblies.ts' +import OutputCtl from '../../../src/cli/OutputCtl.ts' +import { parseStepsInputJson } from '../../../src/cli/stepsInput.ts' + +const tempDirs: string[] = [] + +async function createTempDir(prefix: string): Promise { + const tempDir = await mkdtemp(path.join(tmpdir(), prefix)) + tempDirs.push(tempDir) + return tempDir +} + +function getLegacyRelativeInputPath(inputPath: string): string { + return path.relative(process.cwd(), inputPath).replace(/^(\.\.\/)+/, '') +} + +async function collectRelativeFiles(rootDir: string, currentDir = rootDir): Promise { + const entries = await readdir(currentDir, { withFileTypes: true }) + const files: string[] = [] + + for (const entry of entries) { + const fullPath = path.join(currentDir, entry.name) + if (entry.isDirectory()) { + files.push(...(await collectRelativeFiles(rootDir, fullPath))) + continue + } + + files.push(path.relative(rootDir, fullPath)) + } + + return files.sort() +} + +afterEach(async () => { + vi.restoreAllMocks() + vi.resetModules() + nock.cleanAll() + nock.abortPendingRequests() + + await Promise.all( + tempDirs.splice(0).map((tempDir) => rm(tempDir, { recursive: true, force: true })), + ) +}) + +describe('assemblies create', () => { + it('writes result bytes to stdout when output is -', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const output = new OutputCtl() + const stdoutWrite = vi.spyOn(process.stdout, 'write').mockImplementation(() => true) + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-stdout' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [{ url: 'http://downloads.test/stdout.txt', name: 'stdout.txt' }], + }, + }), + } + + nock('http://downloads.test').get('/stdout.txt').reply(200, 'stdout-contents') + + await expect( + create(output, client as never, { + inputs: [], + output: '-', + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'flux-schnell', + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(stdoutWrite).toHaveBeenCalled() + expect(stdoutWrite.mock.calls.map(([chunk]) => String(chunk)).join('')).toContain( + 'stdout-contents', + ) + }) + + it('waits for stdout drain before finishing stdout downloads', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const output = new OutputCtl() + let resolved = false + const stdoutWrite = vi.spyOn(process.stdout, 'write').mockImplementation(() => false) + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-stdout-drain' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [{ url: 'http://downloads.test/stdout-drain.txt', name: 'stdout-drain.txt' }], + }, + }), + } + + nock('http://downloads.test').get('/stdout-drain.txt').reply(200, 'stdout-drain') + + const createPromise = create(output, client as never, { + inputs: [], + output: '-', + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'flux-schnell', + }, + }, + }).then(() => { + resolved = true + }) + + await delay(20) + expect(resolved).toBe(false) + expect(stdoutWrite).toHaveBeenCalled() + + process.stdout.emit('drain') + + await createPromise + expect(resolved).toBe(true) + }) + + it('returns result URLs for completed assemblies without local output', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-urls' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [{ url: 'http://downloads.test/result.png', name: 'result.png' }], + }, + }), + } + + await expect( + create(output, client as never, { + inputs: [], + output: null, + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'flux-schnell', + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + resultUrls: [ + { + assemblyId: 'assembly-urls', + step: 'generated', + name: 'result.png', + url: 'http://downloads.test/result.png', + }, + ], + }), + ) + }) + + it('rejects stdout output when an assembly returns multiple files', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + const stdoutWrite = vi.spyOn(process.stdout, 'write').mockImplementation(() => true) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-stdout-multi' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [ + { url: 'http://downloads.test/stdout-a.txt', name: 'a.txt' }, + { url: 'http://downloads.test/stdout-b.txt', name: 'b.txt' }, + ], + }, + }), + } + + nock('http://downloads.test').get('/stdout-a.txt').reply(200, 'stdout-a') + nock('http://downloads.test').get('/stdout-b.txt').reply(200, 'stdout-b') + + await expect( + create(output, client as never, { + inputs: [], + output: '-', + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'flux-schnell', + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: true, + }), + ) + + expect(stdoutWrite).not.toHaveBeenCalled() + }) + + it('rejects file outputs when an assembly returns multiple files', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-file-output-multi-') + const outputPath = path.join(tempDir, 'result.txt') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-file-multi' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [ + { url: 'http://downloads.test/result-a.txt', name: 'a.txt' }, + { url: 'http://downloads.test/result-b.txt', name: 'b.txt' }, + ], + }, + }), + } + + nock('http://downloads.test').get('/result-a.txt').reply(200, 'result-a') + nock('http://downloads.test').get('/result-b.txt').reply(200, 'result-b') + + await expect( + create(output, client as never, { + inputs: [], + output: outputPath, + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'flux-schnell', + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: true, + }), + ) + + await expect(stat(outputPath)).rejects.toThrow() + }) + + it('supports bundled single-assembly outputs written to a file path', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-bundle-') + const inputA = path.join(tempDir, 'a.txt') + const inputB = path.join(tempDir, 'b.txt') + const outputPath = path.join(tempDir, 'bundle.zip') + + await writeFile(inputA, 'a') + await writeFile(inputB, 'b') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-1' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + compressed: [{ url: 'http://downloads.test/bundle.zip', name: 'bundle.zip' }], + }, + }), + } + + nock('http://downloads.test').get('/bundle.zip').reply(200, 'bundle-contents') + + await expect( + create(output, client as never, { + inputs: [inputA, inputB], + output: outputPath, + singleAssembly: true, + stepsData: { + compressed: { + robot: '/file/compress', + result: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(client.createAssembly).toHaveBeenCalledTimes(1) + expect(await readFile(outputPath, 'utf8')).toBe('bundle-contents') + }) + + it('runs valid inputless single-assembly steps instead of no-oping', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-inputless-single-assembly-') + const outputPath = path.join(tempDir, 'generated.png') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-inputless-single' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [{ url: 'http://downloads.test/generated.png', name: 'generated.png' }], + }, + }), + } + + nock('http://downloads.test').get('/generated.png').reply(200, 'image-bytes') + + await create(output, client as never, { + inputs: [], + output: outputPath, + singleAssembly: true, + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'google/nano-banana', + }, + }, + }) + + expect(client.createAssembly).toHaveBeenCalledTimes(1) + expect(await readFile(outputPath, 'utf8')).toBe('image-bytes') + }) + + it('runs valid inputless single-assembly steps when --out is a directory', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-inputless-single-assembly-dir-') + const outputDir = path.join(tempDir, 'out') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-inputless-single-dir' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + generated: [ + { url: 'http://downloads.test/generated-dir.png', name: 'generated-dir.png' }, + ], + }, + }), + } + + nock('http://downloads.test').get('/generated-dir.png').reply(200, 'dir-image-bytes') + + await create(output, client as never, { + inputs: [], + output: outputDir, + outputMode: 'directory', + singleAssembly: true, + stepsData: { + generated: { + robot: '/image/generate', + result: true, + prompt: 'hello', + model: 'google/nano-banana', + }, + }, + }) + + expect(client.createAssembly).toHaveBeenCalledTimes(1) + expect(await readFile(path.join(outputDir, 'generated-dir.png'), 'utf8')).toBe( + 'dir-image-bytes', + ) + }) + + it('returns normalized step data from steps input parsing', () => { + const parsed = parseStepsInputJson( + JSON.stringify({ + waveform: { + robot: '/audio/waveform', + use: ':original', + result: true, + style: 1, + }, + }), + ) + + expect(parsed).toEqual({ + waveform: { + robot: '/audio/waveform', + use: ':original', + result: true, + style: 'v1', + }, + }) + }) + + it('rejects invalid steps files before calling the API', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-invalid-steps-') + const stepsPath = path.join(tempDir, 'steps.json') + + await writeFile( + stepsPath, + JSON.stringify({ + generated: { + robot: '/image/generate', + prompt: 123, + model: 'google/nano-banana', + }, + }), + ) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn(), + awaitAssemblyCompletion: vi.fn(), + } + + await expect( + create(output, client as never, { + inputs: [], + output: path.join(tempDir, 'result.png'), + steps: stepsPath, + }), + ).rejects.toThrow(/Invalid steps format/) + + expect(client.createAssembly).not.toHaveBeenCalled() + }) + + it('keeps unchanged inputs in single-assembly rebuilds when one input is stale', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-bundle-stale-') + const inputA = path.join(tempDir, 'a.txt') + const inputB = path.join(tempDir, 'b.txt') + const outputPath = path.join(tempDir, 'bundle.zip') + + await writeFile(inputA, 'a') + await writeFile(inputB, 'b') + await writeFile(outputPath, 'old-bundle') + + const baseTime = new Date('2026-01-01T00:00:00.000Z') + const outputTime = new Date('2026-01-01T00:00:10.000Z') + const changedInputTime = new Date('2026-01-01T00:00:20.000Z') + + await utimes(inputA, changedInputTime, changedInputTime) + await utimes(inputB, baseTime, baseTime) + await utimes(outputPath, outputTime, outputTime) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-stale-bundle' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + compressed: [{ url: 'http://downloads.test/bundle.zip', name: 'bundle.zip' }], + }, + }), + } + + nock('http://downloads.test').get('/bundle.zip').reply(200, 'bundle-contents') + + await create(output, client as never, { + inputs: [inputA, inputB], + output: outputPath, + singleAssembly: true, + stepsData: { + compressed: { + robot: '/file/compress', + result: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + }, + }, + }) + + expect(client.createAssembly).toHaveBeenCalledTimes(1) + const files = client.createAssembly.mock.calls[0]?.[0]?.files + expect(Object.keys(files ?? {}).sort()).toEqual(['a.txt', 'b.txt']) + }) + + it('skips bundled single-assembly runs when the output is newer than every input', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-bundle-skip-stale-') + const inputA = path.join(tempDir, 'a.txt') + const inputB = path.join(tempDir, 'b.txt') + const outputPath = path.join(tempDir, 'bundle.zip') + + await writeFile(inputA, 'a') + await writeFile(inputB, 'b') + await writeFile(outputPath, 'existing-bundle') + + const inputTime = new Date('2026-01-01T00:00:00.000Z') + const outputTime = new Date('2026-01-01T00:00:10.000Z') + + await utimes(inputA, inputTime, inputTime) + await utimes(inputB, inputTime, inputTime) + await utimes(outputPath, outputTime, outputTime) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn(), + awaitAssemblyCompletion: vi.fn(), + } + + await expect( + create(output, client as never, { + inputs: [inputA, inputB], + output: outputPath, + singleAssembly: true, + stepsData: { + compressed: { + robot: '/file/compress', + result: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + results: [], + }), + ) + + expect(client.createAssembly).not.toHaveBeenCalled() + expect(await readFile(outputPath, 'utf8')).toBe('existing-bundle') + }) + + it('reruns single-input bundled assemblies when the input is newer than the output', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-bundle-single-input-stale-') + const inputPath = path.join(tempDir, 'a.txt') + const outputPath = path.join(tempDir, 'bundle.zip') + + await writeFile(inputPath, 'a') + await writeFile(outputPath, 'existing-bundle') + + const outputTime = new Date('2026-01-01T00:00:10.000Z') + const inputTime = new Date('2026-01-01T00:00:20.000Z') + + await utimes(inputPath, inputTime, inputTime) + await utimes(outputPath, outputTime, outputTime) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-single-input-stale' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + compressed: [{ url: 'http://downloads.test/bundle-single.zip', name: 'bundle.zip' }], + }, + }), + } + + nock('http://downloads.test').get('/bundle-single.zip').reply(200, 'fresh-bundle') + + await create(output, client as never, { + inputs: [inputPath], + output: outputPath, + singleAssembly: true, + stepsData: { + compressed: { + robot: '/file/compress', + result: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + }, + }, + }) + + expect(client.createAssembly).toHaveBeenCalledTimes(1) + expect(client.createAssembly.mock.calls[0]?.[0]?.files).toEqual({ + 'a.txt': inputPath, + }) + expect(await readFile(outputPath, 'utf8')).toBe('fresh-bundle') + }) + + it('preserves the original filename for per-file uploads', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-file-upload-name-') + const inputPath = path.join(tempDir, 'README.md') + const outputPath = path.join(tempDir, 'README.pdf') + + await writeFile(inputPath, '# Hello') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-readme-md' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + converted: [{ url: 'http://downloads.test/README.pdf', name: 'README.pdf' }], + }, + }), + } + + nock('http://downloads.test').get('/README.pdf').reply(200, 'pdf-contents') + + await create(output, client as never, { + inputs: [inputPath], + output: outputPath, + stepsData: { + converted: { + robot: '/document/convert', + result: true, + use: ':original', + format: 'pdf', + }, + }, + }) + + expect(client.createAssembly).toHaveBeenCalledTimes(1) + expect(client.createAssembly.mock.calls[0]?.[0]?.files).toEqual({ + in: inputPath, + }) + expect(client.createAssembly.mock.calls[0]?.[0]?.uploads).toBeUndefined() + }) + + it('rewrites existing bundled outputs on single-assembly reruns', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-bundle-rerun-') + const inputA = path.join(tempDir, 'a.txt') + const inputB = path.join(tempDir, 'b.txt') + const outputPath = path.join(tempDir, 'bundle.zip') + + await writeFile(inputA, 'a') + await writeFile(inputB, 'b') + await writeFile(outputPath, 'old-bundle') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-rerun-bundle' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + compressed: [{ url: 'http://downloads.test/bundle-rerun.zip', name: 'bundle.zip' }], + }, + }), + } + + nock('http://downloads.test').get('/bundle-rerun.zip').reply(200, 'fresh-bundle') + + await expect( + create(output, client as never, { + inputs: [inputA, inputB], + output: outputPath, + singleAssembly: true, + stepsData: { + compressed: { + robot: '/file/compress', + result: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(outputPath, 'utf8')).toBe('fresh-bundle') + }) + + it('does not let older watch assemblies overwrite newer results', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.resetModules() + + class FakeWatcher extends EventEmitter { + close(): void { + this.emit('close') + } + } + + const fakeWatcher = new FakeWatcher() + vi.doMock('node-watch', () => { + return { + default: vi.fn(() => fakeWatcher), + } + }) + + const { create: createWithWatch } = await import('../../../src/cli/commands/assemblies.ts') + + const tempDir = await createTempDir('transloadit-watch-') + const inputPath = path.join(tempDir, 'clip.mp4') + const outputPath = path.join(tempDir, 'thumb.jpg') + + await writeFile(inputPath, 'video-v1') + await writeFile(outputPath, 'existing-thumb') + + const baseTime = new Date('2026-01-01T00:00:00.000Z') + const outputTime = new Date('2026-01-01T00:00:10.000Z') + const firstChangeTime = new Date('2026-01-01T00:00:20.000Z') + const secondChangeTime = new Date('2026-01-01T00:00:30.000Z') + + await utimes(inputPath, baseTime, baseTime) + await utimes(outputPath, outputTime, outputTime) + + const output = new OutputCtl() + const client = { + createAssembly: vi + .fn() + .mockResolvedValueOnce({ assembly_id: 'assembly-old' }) + .mockResolvedValueOnce({ assembly_id: 'assembly-new' }), + awaitAssemblyCompletion: vi.fn(async (assemblyId: string) => { + if (assemblyId === 'assembly-old') { + await delay(80) + return { + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [{ url: 'http://downloads.test/old.jpg', name: 'old.jpg' }], + }, + } + } + + await delay(10) + return { + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [{ url: 'http://downloads.test/new.jpg', name: 'new.jpg' }], + }, + } + }), + } + + nock('http://downloads.test').get('/old.jpg').reply(200, 'old-result') + nock('http://downloads.test').get('/new.jpg').reply(200, 'new-result') + + const createPromise = createWithWatch(output, client as never, { + inputs: [inputPath], + output: outputPath, + watch: true, + concurrency: 2, + stepsData: { + thumbs: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + }) + + await delay(20) + await writeFile(inputPath, 'video-v2') + await utimes(inputPath, firstChangeTime, firstChangeTime) + fakeWatcher.emit('change', 'update', inputPath) + + await delay(5) + await writeFile(inputPath, 'video-v3') + await utimes(inputPath, secondChangeTime, secondChangeTime) + fakeWatcher.emit('change', 'update', inputPath) + + await delay(20) + fakeWatcher.close() + + await expect(createPromise).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(outputPath, 'utf8')).toBe('new-result') + }) + + it('does not return stale watched result URLs that lose the race', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.resetModules() + + class FakeWatcher extends EventEmitter { + close(): void { + this.emit('close') + } + } + + const fakeWatcher = new FakeWatcher() + vi.doMock('node-watch', () => { + return { + default: vi.fn(() => fakeWatcher), + } + }) + + const { create: createWithWatch } = await import('../../../src/cli/commands/assemblies.ts') + + const tempDir = await createTempDir('transloadit-watch-urls-') + const inputPath = path.join(tempDir, 'clip.mp4') + const outputPath = path.join(tempDir, 'thumb.jpg') + + await writeFile(inputPath, 'video-v1') + await writeFile(outputPath, 'existing-thumb') + + const baseTime = new Date('2026-01-01T00:00:00.000Z') + const outputTime = new Date('2026-01-01T00:00:10.000Z') + const firstChangeTime = new Date('2026-01-01T00:00:20.000Z') + const secondChangeTime = new Date('2026-01-01T00:00:30.000Z') + + await utimes(inputPath, baseTime, baseTime) + await utimes(outputPath, outputTime, outputTime) + + const output = new OutputCtl() + const client = { + createAssembly: vi + .fn() + .mockResolvedValueOnce({ assembly_id: 'assembly-old' }) + .mockResolvedValueOnce({ assembly_id: 'assembly-new' }), + awaitAssemblyCompletion: vi.fn(async (assemblyId: string) => { + if (assemblyId === 'assembly-old') { + await delay(80) + return { + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [{ url: 'http://downloads.test/old.jpg', name: 'old.jpg' }], + }, + } + } + + await delay(10) + return { + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [{ url: 'http://downloads.test/new.jpg', name: 'new.jpg' }], + }, + } + }), + } + + nock('http://downloads.test').get('/old.jpg').reply(200, 'old-result') + nock('http://downloads.test').get('/new.jpg').reply(200, 'new-result') + + const createPromise = createWithWatch(output, client as never, { + inputs: [inputPath], + output: outputPath, + watch: true, + concurrency: 2, + stepsData: { + thumbs: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + }) + + await delay(20) + await writeFile(inputPath, 'video-v2') + await utimes(inputPath, firstChangeTime, firstChangeTime) + fakeWatcher.emit('change', 'update', inputPath) + + await delay(5) + await writeFile(inputPath, 'video-v3') + await utimes(inputPath, secondChangeTime, secondChangeTime) + fakeWatcher.emit('change', 'update', inputPath) + + await delay(20) + fakeWatcher.close() + + await expect(createPromise).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + resultUrls: [ + { + assemblyId: 'assembly-new', + step: 'thumbs', + name: 'new.jpg', + url: 'http://downloads.test/new.jpg', + }, + ], + }), + ) + }) + + it('does not let a newer watch job get skipped after an older watch result updates the output', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.resetModules() + + class FakeWatcher extends EventEmitter { + close(): void { + this.emit('close') + } + } + + const fakeWatcher = new FakeWatcher() + vi.doMock('node-watch', () => { + return { + default: vi.fn(() => fakeWatcher), + } + }) + + const { create: createWithWatch } = await import('../../../src/cli/commands/assemblies.ts') + + const tempDir = await createTempDir('transloadit-watch-newer-skipped-') + const inputPath = path.join(tempDir, 'clip.mp4') + const outputPath = path.join(tempDir, 'thumb.jpg') + + await writeFile(inputPath, 'video-v1') + await writeFile(outputPath, 'existing-thumb') + + const baseTime = new Date('2026-01-01T00:00:00.000Z') + const outputTime = new Date('2026-01-01T00:00:10.000Z') + const firstChangeTime = new Date('2026-01-01T00:00:20.000Z') + const secondChangeTime = new Date('2026-01-01T00:00:30.000Z') + + await utimes(inputPath, baseTime, baseTime) + await utimes(outputPath, outputTime, outputTime) + + const output = new OutputCtl() + const client = { + createAssembly: vi + .fn() + .mockResolvedValueOnce({ assembly_id: 'assembly-old-fast' }) + .mockResolvedValueOnce({ assembly_id: 'assembly-new-slow' }), + awaitAssemblyCompletion: vi.fn(async (assemblyId: string) => { + if (assemblyId === 'assembly-old-fast') { + await delay(40) + return { + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [{ url: 'http://downloads.test/old-fast.jpg', name: 'old-fast.jpg' }], + }, + } + } + + await delay(140) + return { + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [{ url: 'http://downloads.test/new-slow.jpg', name: 'new-slow.jpg' }], + }, + } + }), + } + + nock('http://downloads.test').get('/old-fast.jpg').reply(200, 'old-fast-result') + nock('http://downloads.test').get('/new-slow.jpg').reply(200, 'new-slow-result') + + const createPromise = createWithWatch(output, client as never, { + inputs: [inputPath], + output: outputPath, + watch: true, + concurrency: 2, + stepsData: { + thumbs: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + }) + + await delay(20) + await writeFile(inputPath, 'video-v2') + await utimes(inputPath, firstChangeTime, firstChangeTime) + fakeWatcher.emit('change', 'update', inputPath) + + await delay(5) + await writeFile(inputPath, 'video-v3') + await utimes(inputPath, secondChangeTime, secondChangeTime) + fakeWatcher.emit('change', 'update', inputPath) + + await delay(20) + fakeWatcher.close() + + await expect(createPromise).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(outputPath, 'utf8')).toBe('new-slow-result') + }) + + it('does not try to delete /dev/stdin after stdin processing', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.spyOn(process.stdout, 'write').mockImplementation(() => true) + vi.spyOn(tty, 'isatty').mockReturnValue(false) + + const tempDir = await createTempDir('transloadit-stdin-') + const outputPath = path.join(tempDir, 'waveform.png') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-stdin' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + waveform: [{ url: 'http://downloads.test/stdin-waveform.png', name: 'waveform.png' }], + }, + }), + } + + nock('http://downloads.test').get('/stdin-waveform.png').reply(200, 'waveform') + + await expect( + create(output, client as never, { + inputs: ['-'], + output: outputPath, + del: true, + stepsData: { + waveform: { + robot: '/audio/waveform', + result: true, + use: ':original', + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(outputPath, 'utf8')).toBe('waveform') + }) + + it('surfaces output plan failures through the normal error path', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.spyOn(process.stdout, 'write').mockImplementation(() => true) + + const tempDir = await createTempDir('transloadit-output-plan-failure-') + const outputDir = path.join(tempDir, 'out') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn(), + awaitAssemblyCompletion: vi.fn(), + } + + await expect( + create(output, client as never, { + inputs: ['-'], + output: outputDir, + outputMode: 'directory', + stepsData: { + waveform: { + robot: '/audio/waveform', + result: true, + use: ':original', + }, + }, + }), + ).rejects.toThrow('You must provide an input to output to a directory') + + expect(client.createAssembly).not.toHaveBeenCalled() + }) + + it('writes single-input directory outputs using result filenames', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-outdir-') + const inputPath = path.join(tempDir, 'clip.mp4') + const outputDir = path.join(tempDir, 'thumbs') + + await writeFile(inputPath, 'video') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-2' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [ + { url: 'http://downloads.test/one.jpg', name: 'one.jpg' }, + { url: 'http://downloads.test/two.jpg', name: 'two.jpg' }, + ], + }, + }), + } + + nock('http://downloads.test').get('/one.jpg').reply(200, 'one') + nock('http://downloads.test').get('/two.jpg').reply(200, 'two') + + await expect( + create( + output, + client as never, + { + inputs: [inputPath], + output: outputDir, + stepsData: { + thumbs: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + outputMode: 'directory', + } as never, + ), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(path.join(outputDir, 'one.jpg'), 'utf8')).toBe('one') + expect(await readFile(path.join(outputDir, 'two.jpg'), 'utf8')).toBe('two') + }) + + it('keeps duplicate sanitized result filenames from overwriting each other', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-dupe-results-') + const inputPath = path.join(tempDir, 'clip.mp4') + const outputDir = path.join(tempDir, 'thumbs') + + await writeFile(inputPath, 'video') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-dupe-results' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [ + { url: 'http://downloads.test/dupe-a.jpg', name: 'thumb.jpg' }, + { url: 'http://downloads.test/dupe-b.jpg', name: 'thumb.jpg' }, + ], + }, + }), + } + + nock('http://downloads.test').get('/dupe-a.jpg').reply(200, 'first-thumb') + nock('http://downloads.test').get('/dupe-b.jpg').reply(200, 'second-thumb') + + await expect( + create(output, client as never, { + inputs: [inputPath], + output: outputDir, + outputMode: 'directory', + stepsData: { + thumbs: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(path.join(outputDir, 'thumb.jpg'), 'utf8')).toBe('first-thumb') + expect(await readFile(path.join(outputDir, 'thumb__1.jpg'), 'utf8')).toBe('second-thumb') + }) + + it('preserves legacy step-directory layout for generic directory outputs', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-legacy-outdir-') + const inputPath = path.join(tempDir, 'clip.mp4') + const outputDir = path.join(tempDir, 'thumbs') + + await writeFile(inputPath, 'video') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-legacy-dir' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + thumbs: [ + { url: 'http://downloads.test/one.jpg', name: 'one.jpg' }, + { url: 'http://downloads.test/two.jpg', name: 'two.jpg' }, + ], + }, + }), + } + + nock('http://downloads.test').get('/one.jpg').reply(200, 'one') + nock('http://downloads.test').get('/two.jpg').reply(200, 'two') + + await create( + output, + client as never, + { + inputs: [inputPath], + output: outputDir, + stepsData: { + thumbs: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + } as never, + ) + + const legacyRelative = getLegacyRelativeInputPath(inputPath) + const legacyBaseDir = path.join(path.dirname(legacyRelative), path.parse(legacyRelative).name) + + expect(await collectRelativeFiles(outputDir)).toEqual([ + path.join(legacyBaseDir, 'thumbs', 'one.jpg'), + path.join(legacyBaseDir, 'thumbs', 'two.jpg'), + ]) + }) + + it('uses the actual result filename for single-result directory outputs', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-single-result-outdir-') + const inputPath = path.join(tempDir, 'archive.zip') + const outputDir = path.join(tempDir, 'extracted') + + await writeFile(inputPath, 'zip-data') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-3' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + decompressed: [{ url: 'http://downloads.test/input.txt', name: 'input.txt' }], + }, + }), + } + + nock('http://downloads.test').get('/input.txt').reply(200, 'hello') + + await expect( + create(output, client as never, { + inputs: [inputPath], + output: outputDir, + stepsData: { + decompressed: { + robot: '/file/decompress', + result: true, + use: ':original', + }, + }, + outputMode: 'directory', + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: false, + }), + ) + + expect(await readFile(path.join(outputDir, 'input.txt'), 'utf8')).toBe('hello') + }) + + it('preserves mapped out paths for legacy single-result directory outputs', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-legacy-single-result-') + const inputPath = path.join(tempDir, 'archive.zip') + const outputDir = path.join(tempDir, 'extracted') + + await writeFile(inputPath, 'zip-data') + await mkdir(outputDir, { recursive: true }) + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockResolvedValue({ assembly_id: 'assembly-legacy-single-result' }), + awaitAssemblyCompletion: vi.fn().mockResolvedValue({ + ok: 'ASSEMBLY_COMPLETED', + results: { + decompressed: [{ url: 'http://downloads.test/input.txt', name: 'input.txt' }], + }, + }), + } + + nock('http://downloads.test').get('/input.txt').reply(200, 'hello') + + await create(output, client as never, { + inputs: [inputPath], + output: outputDir, + stepsData: { + decompressed: { + robot: '/file/decompress', + result: true, + use: ':original', + }, + }, + }) + + expect(await collectRelativeFiles(outputDir)).toEqual([getLegacyRelativeInputPath(inputPath)]) + }) + + it('does not create an empty output file when assembly creation fails', async () => { + vi.spyOn(console, 'error').mockImplementation(() => {}) + + const tempDir = await createTempDir('transloadit-failed-create-') + const inputPath = path.join(tempDir, 'image.jpg') + const outputPath = path.join(tempDir, 'resized.jpg') + + await writeFile(inputPath, 'image-data') + + const output = new OutputCtl() + const client = { + createAssembly: vi.fn().mockRejectedValue(new Error('boom')), + } + + await expect( + create(output, client as never, { + inputs: [inputPath], + output: outputPath, + stepsData: { + resized: { + robot: '/image/resize', + result: true, + use: ':original', + width: 200, + }, + }, + }), + ).resolves.toEqual( + expect.objectContaining({ + hasFailures: true, + }), + ) + + await expect(stat(outputPath)).rejects.toMatchObject({ + code: 'ENOENT', + }) + }) +}) diff --git a/packages/node/test/unit/cli/intents.test.ts b/packages/node/test/unit/cli/intents.test.ts new file mode 100644 index 00000000..56e8d3cb --- /dev/null +++ b/packages/node/test/unit/cli/intents.test.ts @@ -0,0 +1,1310 @@ +import { mkdir, mkdtemp, readFile, rm, writeFile } from 'node:fs/promises' +import { tmpdir } from 'node:os' +import path from 'node:path' +import nock from 'nock' +import { afterEach, describe, expect, it, vi } from 'vitest' +import { z } from 'zod' + +import * as assembliesCommands from '../../../src/cli/commands/assemblies.ts' +import { + findIntentDefinitionByPaths, + getIntentPaths, + getIntentResultStepName, + intentCatalog, +} from '../../../src/cli/intentCommandSpecs.ts' +import { intentCommands } from '../../../src/cli/intentCommands.ts' +import { + coerceIntentFieldValue, + inferIntentFieldKind, + parseStringArrayValue, +} from '../../../src/cli/intentFields.ts' +import { prepareIntentInputs } from '../../../src/cli/intentRuntime.ts' +import OutputCtl from '../../../src/cli/OutputCtl.ts' +import { main } from '../../../src/cli.ts' +import { intentSmokeCases } from '../../support/intentSmokeCases.ts' + +const noopWrite = () => true +const tempDirs: string[] = [] + +const resetExitCode = () => { + process.exitCode = undefined +} + +async function createTempDir(prefix: string): Promise { + const tempDir = await mkdtemp(path.join(tmpdir(), prefix)) + tempDirs.push(tempDir) + return tempDir +} + +async function runIntentCommand( + args: string[], + createResult: Awaited> = { + resultUrls: [], + results: [], + hasFailures: false, + }, +): Promise<{ + createSpy: ReturnType> +}> { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue(createResult) + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main(args) + + return { createSpy } +} + +function getIntentCommand(paths: string[]): (typeof intentCommands)[number] { + const command = intentCommands.find((candidate) => { + const candidatePaths = candidate.paths[0] + return candidatePaths != null && candidatePaths.join(' ') === paths.join(' ') + }) + + if (command == null) { + throw new Error(`No intent command found for ${paths.join(' ')}`) + } + + return command +} + +function getIntentStepName(paths: string[]): string { + const definition = findIntentDefinitionByPaths(paths) + if (definition == null || definition.kind !== 'robot') { + throw new Error(`No robot intent definition found for ${paths.join(' ')}`) + } + + const stepName = getIntentResultStepName(definition) + if (stepName == null) { + throw new Error(`No intent result step name found for ${paths.join(' ')}`) + } + + return stepName +} + +afterEach(() => { + vi.restoreAllMocks() + vi.unstubAllEnvs() + nock.cleanAll() + resetExitCode() + return Promise.all( + tempDirs.splice(0).map((tempDir) => rm(tempDir, { recursive: true, force: true })), + ) +}) + +describe('intent commands', () => { + it('routes image describe labels through /image/describe', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'describe', + '--input', + 'hero.jpg', + '--fields', + 'labels', + '--out', + 'labels.json', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['hero.jpg'], + output: 'labels.json', + stepsData: { + describe: expect.objectContaining({ + robot: '/image/describe', + use: ':original', + result: true, + provider: 'aws', + format: 'json', + granularity: 'list', + explicit_descriptions: false, + }), + }, + }), + ) + }) + + it('prints aligned result URLs without requiring --out', async () => { + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => {}) + + const { createSpy } = await runIntentCommand( + ['image', 'describe', '--input', 'hero.jpg', '--fields', 'labels', '--print-urls'], + { + results: [], + hasFailures: false, + resultUrls: [ + { + assemblyId: 'assembly-1', + step: 'describe', + name: 'hero.json', + url: 'https://example.com/hero.json', + }, + ], + }, + ) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['hero.jpg'], + output: null, + }), + ) + expect(logSpy).toHaveBeenCalledWith(expect.stringContaining('STEP')) + expect(logSpy).toHaveBeenCalledWith(expect.stringContaining('https://example.com/hero.json')) + }) + + it('prints machine-readable result URLs with --json', async () => { + const logSpy = vi.spyOn(console, 'log').mockImplementation(() => {}) + + await runIntentCommand( + ['--json', 'image', 'describe', '--input', 'hero.jpg', '--fields', 'labels', '--print-urls'], + { + results: [], + hasFailures: false, + resultUrls: [ + { + assemblyId: 'assembly-1', + step: 'describe', + name: 'hero.json', + url: 'https://example.com/hero.json', + }, + ], + }, + ) + + expect(logSpy).toHaveBeenCalledWith( + JSON.stringify({ + urls: [ + { + assemblyId: 'assembly-1', + step: 'describe', + name: 'hero.json', + url: 'https://example.com/hero.json', + }, + ], + }), + ) + }) + + it('routes image describe --for wordpress through /ai/chat with a schema', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'describe', + '--input', + 'hero.jpg', + '--for', + 'wordpress', + '--out', + 'fields.json', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['hero.jpg'], + output: 'fields.json', + stepsData: { + describe: expect.objectContaining({ + robot: '/ai/chat', + use: ':original', + result: true, + model: 'anthropic/claude-4-sonnet-20250514', + format: 'json', + return_messages: 'last', + test_credentials: true, + messages: expect.stringContaining('altText, title, caption, description'), + }), + }, + }), + ) + + const describeStep = createSpy.mock.calls[0]?.[2].stepsData?.describe + expect(describeStep).toBeDefined() + if (describeStep == null || typeof describeStep !== 'object') { + throw new Error('Missing describe step') + } + + const schema = JSON.parse(String((describeStep as Record).schema)) + expect(schema).toEqual({ + type: 'object', + additionalProperties: false, + required: ['altText', 'title', 'caption', 'description'], + properties: expect.objectContaining({ + altText: expect.objectContaining({ type: 'string' }), + title: expect.objectContaining({ type: 'string' }), + caption: expect.objectContaining({ type: 'string' }), + description: expect.objectContaining({ type: 'string' }), + }), + }) + }) + + it('rejects combining labels with authored image describe fields', async () => { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue({ + results: [], + hasFailures: false, + }) + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main([ + 'image', + 'describe', + '--input', + 'hero.jpg', + '--fields', + 'labels,caption', + '--out', + 'fields.json', + ]) + + expect(process.exitCode).toBe(1) + expect(createSpy).not.toHaveBeenCalled() + }) + + it('rejects combining --fields labels with --for wordpress', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'describe', + '--input', + 'hero.jpg', + '--fields', + 'labels', + '--for', + 'wordpress', + '--out', + 'fields.json', + ]) + + expect(process.exitCode).toBe(1) + expect(createSpy).not.toHaveBeenCalled() + }) + + it('maps image generate flags to /image/generate step parameters', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'generate', + '--prompt', + 'A red bicycle in a studio', + '--model', + 'flux-schnell', + '--aspect-ratio', + '2:3', + '--out', + 'generated.png', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [], + output: 'generated.png', + stepsData: { + [getIntentStepName(['image', 'generate'])]: expect.objectContaining({ + robot: '/image/generate', + result: true, + prompt: 'A red bicycle in a studio', + model: 'flux-schnell', + aspect_ratio: '2:3', + }), + }, + }), + ) + }) + + it('maps preview generate flags to /file/preview step parameters', async () => { + const { createSpy } = await runIntentCommand([ + 'preview', + 'generate', + '--input', + 'document.pdf', + '--width', + '320', + '--height', + '200', + '--format', + 'jpg', + '--out', + 'preview.jpg', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['document.pdf'], + output: 'preview.jpg', + stepsData: { + [getIntentStepName(['preview', 'generate'])]: expect.objectContaining({ + robot: '/file/preview', + result: true, + use: ':original', + width: 320, + height: 200, + format: 'jpg', + }), + }, + }), + ) + }) + + it('maps markdown pdf to /document/convert with backend Markdown rendering defaults', async () => { + const { createSpy } = await runIntentCommand([ + 'markdown', + 'pdf', + '--input', + 'README.md', + '--out', + 'README.pdf', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['README.md'], + output: 'README.pdf', + stepsData: { + convert: expect.objectContaining({ + robot: '/document/convert', + use: ':original', + result: true, + format: 'pdf', + markdown_format: 'gfm', + markdown_theme: 'github', + }), + }, + }), + ) + }) + + it('passes through explicit markdown options for backend rendering', async () => { + const { createSpy } = await runIntentCommand([ + 'markdown', + 'pdf', + '--input', + 'README.md', + '--markdown-format', + 'commonmark', + '--markdown-theme', + 'bare', + '--out', + 'README.pdf', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['README.md'], + output: 'README.pdf', + stepsData: { + convert: expect.objectContaining({ + robot: '/document/convert', + format: 'pdf', + markdown_format: 'commonmark', + markdown_theme: 'bare', + }), + }, + }), + ) + }) + + it('maps markdown docx to /document/convert with backend Markdown rendering defaults', async () => { + const { createSpy } = await runIntentCommand([ + 'markdown', + 'docx', + '--input', + 'README.md', + '--out', + 'README.docx', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['README.md'], + output: 'README.docx', + stepsData: { + convert: expect.objectContaining({ + robot: '/document/convert', + use: ':original', + result: true, + format: 'docx', + markdown_format: 'gfm', + markdown_theme: 'github', + }), + }, + }), + ) + }) + + it('downloads URL inputs for preview generate before calling assemblies create', async () => { + nock('https://example.com').get('/file.pdf').reply(200, 'pdf-data') + const { createSpy } = await runIntentCommand([ + 'preview', + 'generate', + '--input', + 'https://example.com/file.pdf', + '--out', + 'preview.png', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [expect.stringContaining('transloadit-input-')], + stepsData: { + [getIntentStepName(['preview', 'generate'])]: expect.objectContaining({ + robot: '/file/preview', + use: ':original', + }), + }, + }), + ) + }) + + it('rejects private-host URL inputs for intent commands', async () => { + await expect( + prepareIntentInputs({ + inputValues: ['http://127.0.0.1/secret'], + inputBase64Values: [], + }), + ).rejects.toThrow('URL downloads are limited to public hosts') + }) + + it('keeps duplicate remote basenames as distinct temp inputs', async () => { + nock('http://198.51.100.10').get('/nested/file.pdf').reply(200, 'first-file') + nock('http://198.51.100.11').get('/other/file.pdf').reply(200, 'second-file') + + const prepared = await prepareIntentInputs({ + inputValues: ['http://198.51.100.10/nested/file.pdf', 'http://198.51.100.11/other/file.pdf'], + inputBase64Values: [], + }) + + try { + expect(prepared.inputs).toHaveLength(2) + const firstPath = prepared.inputs[0] + const secondPath = prepared.inputs[1] + expect(firstPath).toBeDefined() + expect(secondPath).toBeDefined() + expect(firstPath).not.toBe(secondPath) + if (firstPath == null || secondPath == null) { + throw new Error('Expected prepared input paths') + } + + expect(await readFile(firstPath, 'utf8')).toBe('first-file') + expect(await readFile(secondPath, 'utf8')).toBe('second-file') + } finally { + await Promise.all(prepared.cleanup.map((cleanup) => cleanup())) + } + }) + + it('supports base64 inputs for intent commands', async () => { + const { createSpy } = await runIntentCommand([ + 'document', + 'convert', + '--input-base64', + Buffer.from('hello world').toString('base64'), + '--format', + 'pdf', + '--out', + 'output.pdf', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [expect.stringContaining('transloadit-input-')], + stepsData: { + [getIntentStepName(['document', 'convert'])]: expect.objectContaining({ + robot: '/document/convert', + use: ':original', + format: 'pdf', + }), + }, + }), + ) + }) + + it('preserves data URL media-type filenames for base64 intent inputs', async () => { + const base64Value = `data:text/plain;base64,${Buffer.from('hello').toString('base64')}` + + const { createSpy } = await runIntentCommand([ + 'document', + 'convert', + '--input-base64', + base64Value, + '--format', + 'pdf', + '--print-urls', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [expect.stringMatching(/input-base64-1\.(txt|text)$/)], + }), + ) + }) + + it('rejects --watch URL inputs before downloading them', async () => { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue({ + results: [], + hasFailures: false, + }) + const downloadScope = nock('https://example.test').get('/file.pdf').reply(200, 'pdf') + + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main([ + 'preview', + 'generate', + '--watch', + '--input', + 'https://example.test/file.pdf', + '--out', + 'preview.png', + ]) + + expect(process.exitCode).toBe(1) + expect(createSpy).not.toHaveBeenCalled() + expect(downloadScope.isDone()).toBe(false) + }) + + it('accepts native boolean flags for generated intent options', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'optimize', + '--input', + 'input.jpg', + '--progressive', + '--out', + 'optimized.jpg', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['input.jpg'], + stepsData: { + [getIntentStepName(['image', 'optimize'])]: expect.objectContaining({ + robot: '/image/optimize', + use: ':original', + progressive: true, + }), + }, + }), + ) + }) + + it('rejects multi-input standard single-assembly runs with a file output before processing', async () => { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const tempDir = await createTempDir('transloadit-intent-single-assembly-') + const inputA = path.join(tempDir, 'a.jpg') + const inputB = path.join(tempDir, 'b.jpg') + await writeFile(inputA, 'a') + await writeFile(inputB, 'b') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue({ + results: [], + hasFailures: false, + }) + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main([ + 'image', + 'optimize', + '--single-assembly', + '--input', + inputA, + '--input', + inputB, + '--out', + path.join(tempDir, 'optimized.jpg'), + ]) + + expect(process.exitCode).toBe(1) + expect(createSpy).not.toHaveBeenCalled() + const loggedError = errorSpy.mock.calls.flatMap((call) => call.map(String)).join(' ') + expect(loggedError).toContain( + 'Output must be a directory when using --single-assembly with multiple inputs', + ) + }) + + it('allows multi-input standard single-assembly runs with --print-urls and no --out', async () => { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const tempDir = await createTempDir('transloadit-intent-single-assembly-urls-') + const inputA = path.join(tempDir, 'a.jpg') + const inputB = path.join(tempDir, 'b.jpg') + await writeFile(inputA, 'a') + await writeFile(inputB, 'b') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue({ + results: [], + hasFailures: false, + resultUrls: [], + }) + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main([ + 'image', + 'optimize', + '--single-assembly', + '--input', + inputA, + '--input', + inputB, + '--print-urls', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [inputA, inputB], + output: null, + singleAssembly: true, + }), + ) + }) + + it('rejects combining --watch with --single-assembly before processing', async () => { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const tempDir = await createTempDir('transloadit-intent-watch-single-assembly-') + const inputPath = path.join(tempDir, 'input.jpg') + await writeFile(inputPath, 'a') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue({ + results: [], + hasFailures: false, + resultUrls: [], + }) + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main([ + 'image', + 'optimize', + '--input', + inputPath, + '--out', + path.join(tempDir, 'optimized.jpg'), + '--watch', + '--single-assembly', + ]) + + expect(process.exitCode).toBe(1) + expect(createSpy).not.toHaveBeenCalled() + const loggedError = errorSpy.mock.calls.flatMap((call) => call.map(String)).join(' ') + expect(loggedError).toContain('--single-assembly cannot be used with --watch') + }) + + it('rejects single-directory standard single-assembly runs with a file output before processing', async () => { + vi.stubEnv('TRANSLOADIT_KEY', 'key') + vi.stubEnv('TRANSLOADIT_SECRET', 'secret') + + const tempDir = await createTempDir('transloadit-intent-single-assembly-dir-') + const inputDir = path.join(tempDir, 'inputs') + await mkdir(inputDir, { recursive: true }) + await writeFile(path.join(inputDir, 'a.jpg'), 'a') + await writeFile(path.join(inputDir, 'b.jpg'), 'b') + + const createSpy = vi.spyOn(assembliesCommands, 'create').mockResolvedValue({ + results: [], + hasFailures: false, + resultUrls: [], + }) + const errorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}) + vi.spyOn(process.stdout, 'write').mockImplementation(noopWrite) + + await main([ + 'image', + 'optimize', + '--single-assembly', + '--input', + inputDir, + '--out', + path.join(tempDir, 'optimized.jpg'), + ]) + + expect(process.exitCode).toBe(1) + expect(createSpy).not.toHaveBeenCalled() + const loggedError = errorSpy.mock.calls.flatMap((call) => call.map(String)).join(' ') + expect(loggedError).toContain( + 'Output must be a directory when using --single-assembly with multiple inputs', + ) + }) + + it('maps video encode-hls to the builtin template', async () => { + const { createSpy } = await runIntentCommand([ + 'video', + 'encode-hls', + '--input', + 'input.mp4', + '--out', + 'dist/hls', + '--recursive', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + template: 'builtin/encode-hls-video@latest', + inputs: ['input.mp4'], + output: 'dist/hls', + recursive: true, + }), + ) + }) + + it('maps text speak flags to /text/speak step parameters', async () => { + const { createSpy } = await runIntentCommand([ + 'text', + 'speak', + '--prompt', + 'Hello world', + '--provider', + 'aws', + '--target-language', + 'en-US', + '--voice', + 'female-1', + '--out', + 'hello.mp3', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [], + output: 'hello.mp3', + stepsData: { + [getIntentStepName(['text', 'speak'])]: expect.objectContaining({ + robot: '/text/speak', + result: true, + prompt: 'Hello world', + provider: 'aws', + target_language: 'en-US', + voice: 'female-1', + }), + }, + }), + ) + }) + + it('supports prompt-only text speak runs without an input file', async () => { + const { createSpy } = await runIntentCommand([ + 'text', + 'speak', + '--prompt', + 'Hello from a prompt', + '--provider', + 'aws', + '--out', + 'hello.mp3', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: [], + output: 'hello.mp3', + stepsData: { + [getIntentStepName(['text', 'speak'])]: { + robot: '/text/speak', + result: true, + prompt: 'Hello from a prompt', + provider: 'aws', + }, + }, + }), + ) + }) + + it('supports file-backed text speak runs without a prompt', async () => { + const { createSpy } = await runIntentCommand([ + 'text', + 'speak', + '--input', + 'article.txt', + '--provider', + 'aws', + '--out', + 'hello.mp3', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['article.txt'], + output: 'hello.mp3', + stepsData: { + [getIntentStepName(['text', 'speak'])]: { + robot: '/text/speak', + result: true, + use: ':original', + provider: 'aws', + }, + }, + }), + ) + }) + + it('omits schema defaults from generated intent steps', async () => { + const { createSpy } = await runIntentCommand([ + 'audio', + 'waveform', + '--input', + 'podcast.mp3', + '--out', + 'waveform.png', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['podcast.mp3'], + output: 'waveform.png', + stepsData: { + [getIntentStepName(['audio', 'waveform'])]: { + robot: '/audio/waveform', + result: true, + use: ':original', + }, + }, + }), + ) + }) + + it('applies schema normalization before submitting generated steps', async () => { + const { createSpy } = await runIntentCommand([ + 'audio', + 'waveform', + '--input', + 'song.mp3', + '--style', + '1', + '--out', + 'waveform.png', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['song.mp3'], + output: 'waveform.png', + stepsData: { + [getIntentStepName(['audio', 'waveform'])]: expect.objectContaining({ + robot: '/audio/waveform', + result: true, + use: ':original', + style: 'v1', + }), + }, + }), + ) + }) + + it('passes directory output intent for multi-file commands', async () => { + const { createSpy } = await runIntentCommand([ + 'video', + 'thumbs', + '--input', + 'demo.mp4', + '--out', + 'thumbs', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['demo.mp4'], + output: 'thumbs', + outputMode: 'directory', + }), + ) + }) + + it('coerces numeric literal union options like video thumbs --rotate', async () => { + const { createSpy } = await runIntentCommand([ + 'video', + 'thumbs', + '--input', + 'demo.mp4', + '--rotate', + '90', + '--out', + 'thumbs', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['video', 'thumbs'])]: expect.objectContaining({ + robot: '/video/thumbs', + rotate: 90, + }), + }, + }), + ) + }) + + it('maps array-valued robot parameters from JSON flags', async () => { + const { createSpy } = await runIntentCommand([ + 'video', + 'thumbs', + '--input', + 'demo.mp4', + '--offsets', + '[1,2,3]', + '--out', + 'thumbs', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['video', 'thumbs'])]: expect.objectContaining({ + robot: '/video/thumbs', + offsets: [1, 2, 3], + }), + }, + }), + ) + }) + + it('maps object-valued robot parameters from JSON flags', async () => { + const { createSpy } = await runIntentCommand([ + 'preview', + 'generate', + '--input', + 'document.pdf', + '--strategy', + '{"document":["page","icon"],"unknown":["icon"]}', + '--out', + 'preview.png', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['preview', 'generate'])]: expect.objectContaining({ + robot: '/file/preview', + strategy: expect.objectContaining({ + document: ['page', 'icon'], + unknown: ['icon'], + }), + }), + }, + }), + ) + }) + + it('rejects blank numeric values instead of coercing them to zero', () => { + expect(() => coerceIntentFieldValue('number', ' ')).toThrow('Expected a number') + }) + + it('classifies string array schemas as string-array intent fields', () => { + expect(inferIntentFieldKind(z.array(z.string()))).toBe('string-array') + expect(inferIntentFieldKind(z.union([z.string(), z.array(z.string())]))).toBe('string-array') + }) + + it('parses shared string-array values from csv, repeated flags, and JSON arrays', () => { + expect(parseStringArrayValue('altText,title')).toEqual(['altText', 'title']) + expect(parseStringArrayValue(['altText,title', 'caption'])).toEqual([ + 'altText', + 'title', + 'caption', + ]) + expect(parseStringArrayValue(['["altText","title"]'])).toEqual(['altText', 'title']) + }) + + it('parses JSON objects for auto-typed flags like image resize --crop', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'resize', + '--input', + 'demo.jpg', + '--crop', + '{"x1":80,"y1":100,"x2":"60%","y2":"80%"}', + '--out', + 'resized.jpg', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['image', 'resize'])]: expect.objectContaining({ + crop: { + x1: 80, + y1: 100, + x2: '60%', + y2: '80%', + }, + }), + }, + }), + ) + }) + + it('parses JSON arrays for auto-typed flags like image resize --watermark-position', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'resize', + '--input', + 'demo.jpg', + '--watermark-position', + '["center","left"]', + '--out', + 'resized.jpg', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['image', 'resize'])]: expect.objectContaining({ + watermark_position: ['center', 'left'], + }), + }, + }), + ) + }) + + it('coerces mixed rotation flags like image resize --rotation 90', async () => { + const { createSpy } = await runIntentCommand([ + 'image', + 'resize', + '--input', + 'demo.jpg', + '--rotation', + '90', + '--out', + 'resized.jpg', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['image', 'resize'])]: expect.objectContaining({ + robot: '/image/resize', + rotation: 90, + }), + }, + }), + ) + }) + + it('coerces mixed boolean-or-number flags like audio waveform --antialiasing 1', async () => { + const { createSpy } = await runIntentCommand([ + 'audio', + 'waveform', + '--input', + 'song.mp3', + '--antialiasing', + '1', + '--out', + 'waveform.png', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['audio', 'waveform'])]: expect.objectContaining({ + robot: '/audio/waveform', + antialiasing: 1, + }), + }, + }), + ) + }) + + it('maps file compress to a bundled single assembly by default', async () => { + const { createSpy } = await runIntentCommand([ + 'file', + 'compress', + '--input', + 'assets', + '--format', + 'zip', + '--gzip', + '--out', + 'assets.zip', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + inputs: ['assets'], + output: 'assets.zip', + singleAssembly: true, + stepsData: { + [getIntentStepName(['file', 'compress'])]: expect.objectContaining({ + robot: '/file/compress', + result: true, + format: 'zip', + gzip: true, + use: { + steps: [':original'], + bundle_steps: true, + }, + }), + }, + }), + ) + }) + + it('omits nullable defaults like file compress password when not provided', async () => { + const { createSpy } = await runIntentCommand([ + 'file', + 'compress', + '--input', + 'assets', + '--format', + 'zip', + '--out', + 'assets.zip', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['file', 'compress'])]: { + robot: '/file/compress', + result: true, + format: 'zip', + use: { + steps: [':original'], + bundle_steps: true, + }, + }, + }, + }), + ) + }) + + it('omits numeric defaults like video thumbs rotate when not provided', async () => { + const { createSpy } = await runIntentCommand([ + 'video', + 'thumbs', + '--input', + 'demo.mp4', + '--out', + 'thumbs', + ]) + + expect(process.exitCode).toBeUndefined() + expect(createSpy).toHaveBeenCalledWith( + expect.any(OutputCtl), + expect.anything(), + expect.objectContaining({ + stepsData: { + [getIntentStepName(['video', 'thumbs'])]: { + robot: '/video/thumbs', + result: true, + use: ':original', + }, + }, + }), + ) + }) + + it('includes required schema flags in generated usage examples', () => { + expect(getIntentCommand(['document', 'convert']).usage.examples).toEqual([ + ['Run the command', expect.stringContaining('--format')], + ]) + expect(getIntentCommand(['text', 'speak']).usage.examples).toEqual([ + ['Run the command', expect.stringContaining('--provider')], + ]) + expect(getIntentCommand(['document', 'convert']).usage.examples).toEqual([ + ['Run the command', expect.stringContaining('output.pdf')], + ]) + }) + + it('keeps the catalog, generated commands, and smoke cases in sync', () => { + const catalogPaths = intentCatalog.map((definition) => getIntentPaths(definition).join(' ')) + const generatedPaths = intentCommands.map((command) => command.paths[0]?.join(' ')) + const smokePaths = intentSmokeCases.map((smokeCase) => smokeCase.paths.join(' ')) + + expect([...catalogPaths].sort()).toEqual([...generatedPaths].sort()) + expect([...catalogPaths].sort()).toEqual([...smokePaths].sort()) + }) +}) diff --git a/packages/node/test/unit/cli/result-urls.test.ts b/packages/node/test/unit/cli/result-urls.test.ts new file mode 100644 index 00000000..ed25432a --- /dev/null +++ b/packages/node/test/unit/cli/result-urls.test.ts @@ -0,0 +1,47 @@ +import { describe, expect, it } from 'vitest' + +import { collectResultUrlRows, formatResultUrlRows } from '../../../src/cli/resultUrls.ts' + +describe('result url helpers', () => { + it('prefers ssl_url and falls back to basename/name fields', () => { + const rows = collectResultUrlRows({ + assemblyId: 'assembly-1', + results: { + generated: [ + { + basename: 'fallback-name.png', + name: null, + ssl_url: 'https://secure.example.com/file.png', + url: 'http://insecure.example.com/file.png', + }, + ], + }, + }) + + expect(rows).toEqual([ + { + assemblyId: 'assembly-1', + step: 'generated', + name: 'fallback-name.png', + url: 'https://secure.example.com/file.png', + }, + ]) + }) + + it('formats aligned human-readable tables', () => { + const table = formatResultUrlRows([ + { + assemblyId: 'assembly-1', + step: 'describe', + name: 'hero.json', + url: 'https://example.com/hero.json', + }, + ]) + + expect(table).toContain('STEP') + expect(table).toContain('NAME') + expect(table).toContain('URL') + expect(table).toContain('describe') + expect(table).toContain('hero.json') + }) +}) diff --git a/packages/node/test/unit/ensure-unique-counter.test.ts b/packages/node/test/unit/ensure-unique-counter.test.ts new file mode 100644 index 00000000..28c4ff31 --- /dev/null +++ b/packages/node/test/unit/ensure-unique-counter.test.ts @@ -0,0 +1,32 @@ +import { describe, expect, it } from 'vitest' +import { ensureUniqueCounterValue } from '../../src/ensureUniqueCounter.ts' + +describe('ensureUniqueCounterValue', () => { + it('does not hand out the same candidate to concurrent callers in the same scope', async () => { + const reserved = new Set() + const seenCandidates: string[] = [] + + const allocate = async (): Promise => + await ensureUniqueCounterValue({ + initialValue: 'result.txt', + isTaken: async (candidate) => { + seenCandidates.push(candidate) + await Promise.resolve() + return reserved.has(candidate) + }, + reserve: (candidate) => { + reserved.add(candidate) + }, + nextValue: (counter) => `result__${counter}.txt`, + scope: reserved, + }) + + const [first, second] = await Promise.all([allocate(), allocate()]) + + expect(new Set([first, second]).size).toBe(2) + expect(reserved).toEqual(new Set([first, second])) + expect(seenCandidates.filter((candidate) => candidate === 'result.txt').length).toBeGreaterThan( + 0, + ) + }) +}) diff --git a/packages/node/test/unit/input-files.test.ts b/packages/node/test/unit/input-files.test.ts index 01179a54..afacfca6 100644 --- a/packages/node/test/unit/input-files.test.ts +++ b/packages/node/test/unit/input-files.test.ts @@ -1,8 +1,27 @@ import { mkdtemp, rm } from 'node:fs/promises' import { tmpdir } from 'node:os' -import { join } from 'node:path' -import { describe, expect, it } from 'vitest' -import { prepareInputFiles } from '../../src/inputFiles.ts' +import { basename, join } from 'node:path' +import nock from 'nock' +import { afterEach, describe, expect, it, vi } from 'vitest' +import { + createPinnedDnsLookup, + prepareInputFiles, + resolvePublicDownloadAddresses, +} from '../../src/inputFiles.ts' + +const { lookupMock } = vi.hoisted(() => ({ + lookupMock: vi.fn(), +})) + +vi.mock('node:dns/promises', () => ({ + lookup: lookupMock, +})) + +afterEach(() => { + vi.restoreAllMocks() + lookupMock.mockReset() + nock.cleanAll() +}) describe('prepareInputFiles', () => { it('splits files, uploads, and url imports', async () => { @@ -60,6 +79,40 @@ describe('prepareInputFiles', () => { } }) + it('preserves leading-dot basenames when duplicate tempfiles collide', async () => { + const tempDir = await mkdtemp(join(tmpdir(), 'transloadit-test-')) + + try { + const base64 = Buffer.from('hello').toString('base64') + + const result = await prepareInputFiles({ + inputFiles: [ + { + kind: 'base64', + field: 'first', + base64, + filename: '.gitignore', + }, + { + kind: 'base64', + field: 'second', + base64, + filename: '.gitignore', + }, + ], + base64Strategy: 'tempfile', + tempDir, + }) + + expect(result.files.first.startsWith(tempDir)).toBe(true) + expect(result.files.second.startsWith(tempDir)).toBe(true) + expect(basename(result.files.first)).toBe('.gitignore') + expect(basename(result.files.second)).toBe('.gitignore-1') + } finally { + await rm(tempDir, { recursive: true, force: true }) + } + }) + it('rejects oversized base64 payloads before decoding', async () => { const oversized = '!'.repeat(128) @@ -93,4 +146,186 @@ describe('prepareInputFiles', () => { }), ).rejects.toThrow('URL downloads are limited') }) + + it('rejects non-canonical IPv6 loopback URL downloads', async () => { + await expect( + prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://[0:0:0:0:0:0:0:1]/secret', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }), + ).rejects.toThrow('URL downloads are limited') + }) + + it('rejects IPv4-mapped loopback URL downloads', async () => { + await expect( + prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://[::ffff:127.0.0.1]/secret', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }), + ).rejects.toThrow('URL downloads are limited') + }) + + it('rejects hostnames that resolve to private IPs', async () => { + lookupMock.mockResolvedValue([{ address: '127.0.0.1', family: 4 }]) + const downloadScope = nock('http://rebind.test').get('/secret').reply(200, 'secret') + + await expect( + prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://rebind.test/secret', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }), + ).rejects.toThrow('URL downloads are limited') + + expect(downloadScope.isDone()).toBe(false) + }) + + it('rejects hostnames that resolve to carrier-grade NAT ranges', async () => { + lookupMock.mockResolvedValue([{ address: '100.64.0.1', family: 4 }]) + + await expect( + prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://cgnat.test/secret', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }), + ).rejects.toThrow('URL downloads are limited') + }) + + it('rejects hostnames that resolve to benchmark-testing ranges', async () => { + lookupMock.mockResolvedValue([{ address: '198.18.0.1', family: 4 }]) + + await expect( + prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://benchmark.test/secret', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }), + ).rejects.toThrow('URL downloads are limited') + }) + + it('rejects redirects to private URL downloads', async () => { + lookupMock.mockResolvedValue([{ address: '198.51.100.10', family: 4 }]) + const publicScope = nock('http://198.51.100.10') + .get('/public') + .reply(302, undefined, { Location: 'http://127.0.0.1/secret' }) + const privateScope = nock('http://127.0.0.1').get('/secret').reply(200, 'secret') + + await expect( + prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://198.51.100.10/public', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }), + ).rejects.toThrow('URL downloads are limited') + + expect(publicScope.isDone()).toBe(true) + expect(privateScope.isDone()).toBe(false) + }) + + it('allows public IPv6 literal URL downloads under the private-host guard', async () => { + const resolved = await resolvePublicDownloadAddresses('http://[2001:db8::1]/public') + + expect(resolved).toEqual([{ address: '2001:db8::1', family: 6 }]) + expect(lookupMock).not.toHaveBeenCalled() + }) + + it('pins URL downloads to the validated DNS answer', async () => { + lookupMock.mockResolvedValue([{ address: '198.51.100.10', family: 4 }]) + const downloadScope = nock('http://rebind.test').get('/public').reply(200, 'public-data') + + const result = await prepareInputFiles({ + inputFiles: [ + { + kind: 'url', + field: 'remote', + url: 'http://rebind.test/public', + }, + ], + urlStrategy: 'download', + allowPrivateUrls: false, + }) + + try { + const downloadedPath = result.files.remote + expect(downloadedPath).toBeDefined() + expect(downloadScope.isDone()).toBe(true) + } finally { + await Promise.all(result.cleanup.map((cleanup) => cleanup())) + } + }) + + it('returns all validated public addresses from the pinned lookup and honors requested families', async () => { + const lookup = createPinnedDnsLookup([ + { address: '2001:db8::1', family: 6 }, + { address: '198.51.100.10', family: 4 }, + ]) + + const allAddresses = await new Promise>( + (resolve, reject) => { + lookup('rebind.test', { all: true }, (error, result) => { + if (error != null) { + reject(error) + return + } + resolve(result) + }) + }, + ) + const ipv4Address = await new Promise<{ address: string; family: number }>( + (resolve, reject) => { + lookup('rebind.test', 4, (error, address, family) => { + if (error != null) { + reject(error) + return + } + resolve({ address, family }) + }) + }, + ) + + expect(allAddresses).toEqual([ + { address: '2001:db8::1', family: 6, expires: 0 }, + { address: '198.51.100.10', family: 4, expires: 0 }, + ]) + expect(ipv4Address).toEqual({ address: '198.51.100.10', family: 4 }) + }) }) diff --git a/packages/transloadit/README.md b/packages/transloadit/README.md new file mode 100644 index 00000000..b48854c1 --- /dev/null +++ b/packages/transloadit/README.md @@ -0,0 +1,1737 @@ +[![Build Status](https://github.com/transloadit/node-sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/transloadit/node-sdk/actions/workflows/ci.yml) +[![Coverage](https://codecov.io/gh/transloadit/node-sdk/branch/main/graph/badge.svg)](https://codecov.io/gh/transloadit/node-sdk) + + + + + + Transloadit Logo + + + +This is the official **Node.js** SDK for [Transloadit](https://transloadit.com)'s file uploading and encoding service. + +## Intro + +[Transloadit](https://transloadit.com) is a service that helps you handle file +uploads, resize, crop and watermark your images, make GIFs, transcode your +videos, extract thumbnails, generate audio waveforms, [and so much more](https://transloadit.com/demos/). In +short, [Transloadit](https://transloadit.com) is the Swiss Army Knife for your +files. + +This is a **Node.js** SDK to make it easy to talk to the +[Transloadit](https://transloadit.com) REST API. + +## Requirements + +- [Node.js](https://nodejs.org/en/) version 20 or newer +- [A Transloadit account](https://transloadit.com/signup/) ([free signup](https://transloadit.com/pricing/)) +- [Your API credentials](https://transloadit.com/c/template-credentials) (`authKey`, `authSecret`) + +## Install + +Inside your project, type: + +```bash +yarn add @transloadit/node +``` + +or + +```bash +npm install --save @transloadit/node +``` + +The legacy npm package name `transloadit` is kept as an alias for backward compatibility, but +`@transloadit/node` is the canonical package name. + +## Command Line Interface (CLI) + +This package includes a full-featured CLI for interacting with Transloadit from your terminal. + +### Quick Start + +```bash +# Set your credentials +export TRANSLOADIT_KEY="YOUR_TRANSLOADIT_KEY" +export TRANSLOADIT_SECRET="YOUR_TRANSLOADIT_SECRET" + +# See all available commands +npx -y @transloadit/node --help +``` + +The CLI binary is still called `transloadit`, so command examples below may use +`npx transloadit ...`. + +### Minting Bearer Tokens (Hosted MCP) + +If you want to connect an agent to the Transloadit-hosted MCP endpoint, mint a short-lived bearer +token via `POST /token`: + +```bash +# Prints JSON to stdout (stderr may include npx/npm noise) +npx -y transloadit auth token --aud mcp +``` + +To reduce blast radius, you can request a narrower set of scopes. The server will only grant scopes +that your Auth Key already has (it applies an intersection), and will error if you request scopes +you are not allowed to use. + +```bash +# Request a narrower token (comma-separated scopes) +npx -y transloadit auth token --aud mcp --scope assemblies:write,templates:read +``` + +### Processing Media + +For common one-off tasks, prefer the intent-first commands: + +The full generated intent reference also lives in [`docs/intent-commands.md`](./docs/intent-commands.md). + + + +#### At a glance + +Intent commands are the fastest path to common one-off tasks from the CLI. +Use `--print-urls` when you want temporary result URLs without downloading locally. +All intent commands also support the global CLI flags `--json`, `--log-level`, `--endpoint`, and `--help`. + +| Command | What it does | Input | Output | +| --- | --- | --- | --- | +| `image generate` | Generate images from text prompts | none | file | +| `preview generate` | Generate a preview thumbnail | file, dir, URL, base64 | file | +| `image remove-background` | Remove the background from images | file, dir, URL, base64 | file | +| `image optimize` | Optimize images without quality loss | file, dir, URL, base64 | file | +| `image resize` | Convert, resize, or watermark images | file, dir, URL, base64 | file | +| `document convert` | Convert documents into different formats | file, dir, URL, base64 | file | +| `document optimize` | Reduce PDF file size | file, dir, URL, base64 | file | +| `document auto-rotate` | Auto-rotate documents to the correct orientation | file, dir, URL, base64 | file | +| `document thumbs` | Extract thumbnail images from documents | file, dir, URL, base64 | directory | +| `audio waveform` | Generate waveform images from audio | file, dir, URL, base64 | file | +| `text speak` | Speak text | file, dir, URL, base64 | file | +| `video thumbs` | Extract thumbnails from videos | file, dir, URL, base64 | directory | +| `video encode-hls` | Run builtin/encode-hls-video@latest | file, dir, URL, base64 | directory | +| `image describe` | Describe images as labels or publishable text fields | file, dir, URL, base64 | file | +| `markdown pdf` | Render Markdown files as PDFs | file, dir, URL, base64 | file | +| `markdown docx` | Render Markdown files as DOCX documents | file, dir, URL, base64 | file | +| `file compress` | Compress files | file, dir, URL, base64 | file | +| `file decompress` | Decompress archives | file, dir, URL, base64 | directory | + +> At least one of `--out` or `--print-urls` is required on every intent command. + +#### Shared flags + +These flags are available across many intent commands, so the per-command sections below focus on differences. + +**Shared file input & output flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--input, -i` | `path \| dir \| url \| -` | varies | `input.file` | Provide an input path, directory, URL, or - for stdin | +| `--input-base64` | `base64 \| data URL` | no | `data:text/plain;base64,SGVsbG8=` | Provide base64-encoded input content directly | +| `--out, -o` | `path` | yes* | `output.file` | Write the result to this path or directory | +| `--print-urls` | `boolean` | no | `false` | Print temporary result URLs after completion | + +**Shared no-input output flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--out, -o` | `path` | yes* | `output.file` | Write the result to this path | +| `--print-urls` | `boolean` | no | `false` | Print temporary result URLs after completion | + +**Shared processing flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--recursive, -r` | `boolean` | no | `false` | Enumerate input directories recursively | +| `--delete-after-processing, -d` | `boolean` | no | `false` | Delete input files after they are processed | +| `--reprocess-stale` | `boolean` | no | `false` | Process inputs even if output is newer | + +**Shared watch flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--watch, -w` | `boolean` | no | `false` | Watch inputs for changes | +| `--concurrency, -c` | `number` | no | `5` | Maximum number of concurrent assemblies (default: 5) | + +**Shared bundling flags** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--single-assembly` | `boolean` | no | `false` | Pass all input files to a single assembly instead of one assembly per file | + +#### `image generate` + +Generate images from text prompts + +Runs `/image/generate` and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image generate [options] +``` + +**Quick facts** + +- Input: none +- Output: file +- Execution: no input +- Backend: `/image/generate` + +**Shared flags** + +- Uses the shared output flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--model` | `string` | no | `value` | The AI model to use for image generation. Defaults to google/nano-banana. | +| `--prompt` | `string` | yes | `"A red bicycle in a studio"` | The prompt describing the desired image content. | +| `--format` | `string` | no | `jpg` | Format of the generated image. | +| `--seed` | `number` | no | `1` | Seed for the random number generator. | +| `--aspect-ratio` | `string` | no | `value` | Aspect ratio of the generated image. | +| `--height` | `number` | no | `1` | Height of the generated image. | +| `--width` | `number` | no | `1` | Width of the generated image. | +| `--style` | `string` | no | `value` | Style of the generated image. | +| `--num-outputs` | `number` | no | `1` | Number of image variants to generate. | + +**Examples** + +```bash +transloadit image generate --prompt "A red bicycle in a studio" --out output.png +``` + +#### `preview generate` + +Generate a preview thumbnail + +Runs `/file/preview` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit preview generate --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/file/preview` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `jpg` | The output format for the generated thumbnail image. If a short video clip is generated using the clip strategy, its format is defined by clip_format. | +| `--width` | `number` | no | `1` | Width of the thumbnail, in pixels. | +| `--height` | `number` | no | `1` | Height of the thumbnail, in pixels. | +| `--resize-strategy` | `string` | no | `crop` | To achieve the desired dimensions of the preview thumbnail, the Robot might have to resize the generated image. | +| `--background` | `string` | no | `value` | The hexadecimal code of the color used to fill the background (only used for the pad resize strategy). | +| `--strategy` | `json` | no | `value` | Definition of the thumbnail generation process per file category. | +| `--artwork-outer-color` | `string` | no | `value` | The color used in the outer parts of the artwork's gradient. | +| `--artwork-center-color` | `string` | no | `value` | The color used in the center of the artwork's gradient. | +| `--waveform-center-color` | `string` | no | `value` | The color used in the center of the waveform's gradient. The format is #rrggbb[aa] (red, green, blue, alpha). Only used if the waveform strategy for audio files is applied. | +| `--waveform-outer-color` | `string` | no | `value` | The color used in the outer parts of the waveform's gradient. The format is #rrggbb[aa] (red, green, blue, alpha). Only used if the waveform strategy for audio files is applied. | +| `--waveform-height` | `number` | no | `1` | Height of the waveform, in pixels. Only used if the waveform strategy for audio files is applied. It can be utilized to ensure that the waveform only takes up a section of the… | +| `--waveform-width` | `number` | no | `1` | Width of the waveform, in pixels. Only used if the waveform strategy for audio files is applied. It can be utilized to ensure that the waveform only takes up a section of the… | +| `--icon-style` | `string` | no | `square` | The style of the icon generated if the icon strategy is applied. | +| `--icon-text-color` | `string` | no | `value` | The color of the text used in the icon. The format is #rrggbb[aa]. Only used if the icon strategy is applied. | +| `--icon-text-font` | `string` | no | `value` | The font family of the text used in the icon. Only used if the icon strategy is applied. Here is a list of all supported fonts. | +| `--icon-text-content` | `string` | no | `extension` | The content of the text box in generated icons. Only used if the icon_style parameter is set to with-text. The default value, extension, adds the file extension (e.g. MP4, JPEG)… | +| `--optimize` | `boolean` | no | `true` | Specifies whether the generated preview image should be optimized to reduce the image's file size while keeping their quaility. | +| `--optimize-priority` | `string` | no | `compression-ratio` | Specifies whether conversion speed or compression ratio is prioritized when optimizing images. | +| `--optimize-progressive` | `boolean` | no | `true` | Specifies whether images should be interlaced, which makes the result image load progressively in browsers. | +| `--clip-format` | `string` | no | `apng` | The animated image format for the generated video clip. Only used if the clip strategy for video files is applied. Please consult the MDN Web Docs for detailed information about… | +| `--clip-offset` | `number` | no | `1` | The start position in seconds of where the clip is cut. Only used if the clip strategy for video files is applied. Be aware that for larger video only the first few MBs of the… | +| `--clip-duration` | `number` | no | `1` | The duration in seconds of the generated video clip. Only used if the clip strategy for video files is applied. Be aware that a longer clip duration also results in a larger file… | +| `--clip-framerate` | `number` | no | `1` | The framerate of the generated video clip. Only used if the clip strategy for video files is applied. Be aware that a higher framerate appears smoother but also results in a… | +| `--clip-loop` | `boolean` | no | `true` | Specifies whether the generated animated image should loop forever (true) or stop after playing the animation once (false). | + +**Examples** + +```bash +transloadit preview generate --input input.file --out output.file +``` + +#### `image remove-background` + +Remove the background from images + +Runs `/image/bgremove` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image remove-background --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/bgremove` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--select` | `string` | no | `foreground` | Region to select and keep in the image. The other region is removed. | +| `--format` | `string` | no | `png` | Format of the generated image. | +| `--provider` | `string` | no | `aws` | Provider to use for removing the background. | +| `--model` | `string` | no | `value` | Provider-specific model to use for removing the background. Mostly intended for testing and evaluation. | + +**Examples** + +```bash +transloadit image remove-background --input input.png --out output.png +``` + +#### `image optimize` + +Optimize images without quality loss + +Runs `/image/optimize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image optimize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/optimize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--priority` | `string` | no | `compression-ratio` | Provides different algorithms for better or worse compression for your images, but that run slower or faster. | +| `--progressive` | `boolean` | no | `true` | Interlaces the image if set to true, which makes the result image load progressively in browsers. | +| `--preserve-meta-data` | `boolean` | no | `true` | Specifies if the image's metadata should be preserved during the optimization, or not. | +| `--fix-breaking-images` | `boolean` | no | `true` | If set to true this parameter tries to fix images that would otherwise make the underlying tool error out and thereby break your Assemblies . | + +**Examples** + +```bash +transloadit image optimize --input input.png --out output.png +``` + +#### `image resize` + +Convert, resize, or watermark images + +Runs `/image/resize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit image resize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/image/resize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `value` | The output format for the modified image. Some of the most important available formats are "jpg", "png", "gif", and "tiff". For a complete lists of all formats that we can write… | +| `--width` | `number` | no | `1` | Width of the result in pixels. If not specified, will default to the width of the original. | +| `--height` | `number` | no | `1` | Height of the new image, in pixels. If not specified, will default to the height of the input image. | +| `--resize-strategy` | `string` | no | `crop` | See the list of available resize strategies. | +| `--zoom` | `boolean` | no | `true` | If this is set to false, smaller images will not be stretched to the desired width and height. | +| `--crop` | `auto` | no | `value` | Specify an object containing coordinates for the top left and bottom right corners of the rectangle to be cropped from the original image(s). | +| `--gravity` | `string` | no | `bottom` | The direction from which the image is to be cropped, when "resize_strategy" is set to "crop", but no crop coordinates are defined. | +| `--strip` | `boolean` | no | `true` | Strips all metadata from the image. This is useful to keep thumbnails as small as possible. | +| `--alpha` | `string` | no | `Activate` | Gives control of the alpha/matte channel of an image. | +| `--preclip-alpha` | `string` | no | `Activate` | Gives control of the alpha/matte channel of an image before applying the clipping path via clip: true. | +| `--flatten` | `boolean` | no | `true` | Flattens all layers onto the specified background to achieve better results from transparent formats to non-transparent formats, as explained in the ImageMagick documentation. | +| `--correct-gamma` | `boolean` | no | `true` | Prevents gamma errors common in many image scaling algorithms. | +| `--quality` | `number` | no | `1` | Controls the image compression for JPG and PNG images. Please also take a look at 🤖/image/optimize. | +| `--adaptive-filtering` | `boolean` | no | `true` | Controls the image compression for PNG images. Setting to true results in smaller file size, while increasing processing time. It is encouraged to keep this option disabled. | +| `--background` | `string` | no | `transparent` | Either the hexadecimal code or name of the color used to fill the background (used for the pad resize strategy). | +| `--frame` | `number` | no | `1` | Use this parameter when dealing with animated GIF files to specify which frame of the GIF is used for the operation. | +| `--colorspace` | `string` | no | `CMY` | Sets the image colorspace. For details about the available values, see the ImageMagick documentation. Please note that if you were using "RGB", we recommend using "sRGB" instead… | +| `--type` | `string` | no | `Bilevel` | Sets the image color type. For details about the available values, see the ImageMagick documentation. If you're using colorspace, ImageMagick might try to find the most efficient… | +| `--sepia` | `number` | no | `1` | Applies a sepia tone effect in percent. | +| `--rotation` | `auto` | no | `auto` | Determines whether the image should be rotated. Use any number to specify the rotation angle in degrees (e.g., 90, 180, 270, 360, or precise values like 2.9). Use the value true… | +| `--compress` | `string` | no | `BZip` | Specifies pixel compression for when the image is written. Compression is disabled by default. Please also take a look at 🤖/image/optimize. | +| `--blur` | `string` | no | `value` | Specifies gaussian blur, using a value with the form {radius}x{sigma}. | +| `--blur-regions` | `json` | no | `value` | Specifies an array of ellipse objects that should be blurred on the image. | +| `--brightness` | `number` | no | `1` | Increases or decreases the brightness of the image by using a multiplier. For example 1.5 would increase the brightness by 50%, and 0.75 would decrease the brightness by 25%. | +| `--saturation` | `number` | no | `1` | Increases or decreases the saturation of the image by using a multiplier. For example 1.5 would increase the saturation by 50%, and 0.75 would decrease the saturation by 25%. | +| `--hue` | `number` | no | `1` | Changes the hue by rotating the color of the image. The value 100 would produce no change whereas 0 and 200 will negate the colors in the image. | +| `--contrast` | `number` | no | `1` | Adjusts the contrast of the image. A value of 1 produces no change. Values below 1 decrease contrast (with 0 being minimum contrast), and values above 1 increase contrast (with 2… | +| `--watermark-url` | `string` | no | `value` | A URL indicating a PNG image to be overlaid above this image. | +| `--watermark-position` | `string[]` | no | `bottom` | The position at which the watermark is placed. The available options are "center", "top", "bottom", "left", and "right". You can also combine options, such as "bottom-right". An… | +| `--watermark-x-offset` | `number` | no | `1` | The x-offset in number of pixels at which the watermark will be placed in relation to the position it has due to watermark_position. | +| `--watermark-y-offset` | `number` | no | `1` | The y-offset in number of pixels at which the watermark will be placed in relation to the position it has due to watermark_position. | +| `--watermark-size` | `string` | no | `value` | The size of the watermark, as a percentage. For example, a value of "50%" means that size of the watermark will be 50% of the size of image on which it is placed. The exact… | +| `--watermark-resize-strategy` | `string` | no | `area` | Available values are "fit", "min_fit", "stretch" and "area". | +| `--watermark-opacity` | `number` | no | `1` | The opacity of the watermark, where 0.0 is fully transparent and 1.0 is fully opaque. | +| `--watermark-repeat-x` | `boolean` | no | `true` | When set to true, the watermark will be repeated horizontally across the entire width of the image. | +| `--watermark-repeat-y` | `boolean` | no | `true` | When set to true, the watermark will be repeated vertically across the entire height of the image. | +| `--text` | `json` | no | `value` | Text overlays to be applied to the image. Can be either a single text object or an array of text objects. Each text object contains text rules. The following text parameters are… | +| `--progressive` | `boolean` | no | `true` | Interlaces the image if set to true, which makes the image load progressively in browsers. | +| `--transparent` | `string` | no | `transparent` | Make this color transparent within the image. Example: "255,255,255". | +| `--trim-whitespace` | `boolean` | no | `true` | This determines if additional whitespace around the image should first be trimmed away. | +| `--clip` | `auto` | no | `value` | Apply the clipping path to other operations in the resize job, if one is present. | +| `--negate` | `boolean` | no | `true` | Replace each pixel with its complementary color, effectively negating the image. Especially useful when testing clipping. | +| `--density` | `string` | no | `value` | While in-memory quality and file format depth specifies the color resolution, the density of an image is the spatial (space) resolution of the image. | +| `--monochrome` | `boolean` | no | `true` | Transform the image to black and white. This is a shortcut for setting the colorspace to Gray and type to Bilevel. | +| `--shave` | `auto` | no | `value` | Shave pixels from the image edges. The value should be in the format width or widthxheight to specify the number of pixels to remove from each side. | + +**Examples** + +```bash +transloadit image resize --input input.png --out output.png +``` + +#### `document convert` + +Convert documents into different formats + +Runs `/document/convert` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document convert --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/convert` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | yes | `pdf` | The desired format for document conversion. | +| `--markdown-format` | `string` | no | `commonmark` | Markdown can be represented in several variants, so when using this Robot to transform Markdown into HTML please specify which revision is being used. | +| `--markdown-theme` | `string` | no | `bare` | This parameter overhauls your Markdown files styling based on several canned presets. | +| `--pdf-margin` | `string` | no | `value` | PDF Paper margins, separated by , and with units. We support the following unit values: px, in, cm, mm. Currently this parameter is only supported when converting from html. | +| `--pdf-print-background` | `boolean` | no | `true` | Print PDF background graphics. Currently this parameter is only supported when converting from html. | +| `--pdf-format` | `string` | no | `A0` | PDF paper format. Currently this parameter is only supported when converting from html. | +| `--pdf-display-header-footer` | `boolean` | no | `true` | Display PDF header and footer. Currently this parameter is only supported when converting from html. | +| `--pdf-header-template` | `string` | no | `value` | HTML template for the PDF print header. Should be valid HTML markup with following classes used to inject printing values into them: - date formatted print date - title document… | +| `--pdf-footer-template` | `string` | no | `value` | HTML template for the PDF print footer. Should use the same format as the pdf_header_template. Currently this parameter is only supported when converting from html, and requires… | + +**Examples** + +```bash +transloadit document convert --input input.pdf --format pdf --out output.pdf +``` + +#### `document optimize` + +Reduce PDF file size + +Runs `/document/optimize` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document optimize --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/optimize` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--preset` | `string` | no | `screen` | The quality preset to use for optimization. Each preset provides a different balance between file size and quality: - screen - Lowest quality, smallest file size. Best for screen… | +| `--image-dpi` | `number` | no | `1` | Target DPI (dots per inch) for embedded images. When specified, this overrides the DPI setting from the preset. Higher DPI values result in better image quality but larger file… | +| `--compress-fonts` | `boolean` | no | `true` | Whether to compress embedded fonts. When enabled, fonts are compressed to reduce file size. | +| `--subset-fonts` | `boolean` | no | `true` | Whether to subset embedded fonts, keeping only the glyphs that are actually used in the document. | +| `--remove-metadata` | `boolean` | no | `true` | Whether to strip document metadata (title, author, keywords, etc.) from the PDF. This can provide a small reduction in file size and may be useful for privacy. | +| `--linearize` | `boolean` | no | `true` | Whether to linearize (optimize for Fast Web View) the output PDF. | +| `--compatibility` | `string` | no | `1.4` | The PDF version compatibility level. Lower versions have broader compatibility but fewer features. Higher versions support more advanced features but may not open in older PDF… | + +**Examples** + +```bash +transloadit document optimize --input input.pdf --out output.pdf +``` + +#### `document auto-rotate` + +Auto-rotate documents to the correct orientation + +Runs `/document/autorotate` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit document auto-rotate --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/autorotate` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit document auto-rotate --input input.pdf --out output.pdf +``` + +#### `document thumbs` + +Extract thumbnail images from documents + +Runs `/document/thumbs` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit document thumbs --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/document/thumbs` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--page` | `number` | no | `1` | The PDF page that you want to convert to an image. By default the value is null which means that all pages will be converted into images. | +| `--format` | `string` | no | `jpg` | The format of the extracted image(s). If you specify the value "gif", then an animated gif cycling through all pages is created. Please check out this demo to learn more about… | +| `--delay` | `number` | no | `1` | If your output format is "gif" then this parameter sets the number of 100th seconds to pass before the next frame is shown in the animation. | +| `--width` | `number` | no | `1` | Width of the new image, in pixels. If not specified, will default to the width of the input image | +| `--height` | `number` | no | `1` | Height of the new image, in pixels. If not specified, will default to the height of the input image | +| `--resize-strategy` | `string` | no | `crop` | One of the available resize strategies. | +| `--background` | `string` | no | `value` | Either the hexadecimal code or name of the color used to fill the background (only used for the pad resize strategy). | +| `--alpha` | `string` | no | `Remove` | Change how the alpha channel of the resulting image should work. | +| `--density` | `string` | no | `value` | While in-memory quality and file format depth specifies the color resolution, the density of an image is the spatial (space) resolution of the image. | +| `--antialiasing` | `boolean` | no | `true` | Controls whether or not antialiasing is used to remove jagged edges from text or images in a document. | +| `--colorspace` | `string` | no | `CMY` | Sets the image colorspace. For details about the available values, see the ImageMagick documentation. Please note that if you were using "RGB", we recommend using "sRGB".… | +| `--trim-whitespace` | `boolean` | no | `true` | This determines if additional whitespace around the PDF should first be trimmed away before it is converted to an image. | +| `--pdf-use-cropbox` | `boolean` | no | `true` | Some PDF documents lie about their dimensions. For instance they'll say they are landscape, but when opened in decent Desktop readers, it's really in portrait mode. This can… | +| `--turbo` | `boolean` | no | `true` | If you set this to false, the robot will not emit files as they become available. | + +**Examples** + +```bash +transloadit document thumbs --input input.pdf --out output/ +``` + +#### `audio waveform` + +Generate waveform images from audio + +Runs `/audio/waveform` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit audio waveform --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/audio/waveform` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--ffmpeg` | `json` | no | `value` | A parameter object to be passed to FFmpeg. If a preset is used, the options specified are merged on top of the ones from the preset. For available options, see the FFmpeg… | +| `--format` | `string` | no | `image` | The format of the result file. Can be "image" or "json". If "image" is supplied, a PNG image will be created, otherwise a JSON file. | +| `--width` | `number` | no | `1` | The width of the resulting image if the format "image" was selected. | +| `--height` | `number` | no | `1` | The height of the resulting image if the format "image" was selected. | +| `--antialiasing` | `auto` | no | `0` | Either a value of 0 or 1, or true/false, corresponding to if you want to enable antialiasing to achieve smoother edges in the waveform graph or not. | +| `--background-color` | `string` | no | `value` | The background color of the resulting image in the "rrggbbaa" format (red, green, blue, alpha), if the format "image" was selected. | +| `--center-color` | `string` | no | `value` | The color used in the center of the gradient. The format is "rrggbbaa" (red, green, blue, alpha). | +| `--outer-color` | `string` | no | `value` | The color used in the outer parts of the gradient. The format is "rrggbbaa" (red, green, blue, alpha). | +| `--style` | `string` | no | `v0` | Waveform style version. - "v0": Legacy waveform generation (default). - "v1": Advanced waveform generation with additional parameters. For backwards compatibility, numeric values… | +| `--split-channels` | `boolean` | no | `true` | Available when style is "v1". If set to true, outputs multi-channel waveform data or image files, one per channel. | +| `--zoom` | `number` | no | `1` | Available when style is "v1". Zoom level in samples per pixel. This parameter cannot be used together with pixels_per_second. | +| `--pixels-per-second` | `number` | no | `1` | Available when style is "v1". Zoom level in pixels per second. This parameter cannot be used together with zoom. | +| `--bits` | `number` | no | `8` | Available when style is "v1". Bit depth for waveform data. Can be 8 or 16. | +| `--start` | `number` | no | `1` | Available when style is "v1". Start time in seconds. | +| `--end` | `number` | no | `1` | Available when style is "v1". End time in seconds (0 means end of audio). | +| `--colors` | `string` | no | `audition` | Available when style is "v1". Color scheme to use. Can be "audition" or "audacity". | +| `--border-color` | `string` | no | `value` | Available when style is "v1". Border color in "rrggbbaa" format. | +| `--waveform-style` | `string` | no | `normal` | Available when style is "v1". Waveform style. Can be "normal" or "bars". | +| `--bar-width` | `number` | no | `1` | Available when style is "v1". Width of bars in pixels when waveform_style is "bars". | +| `--bar-gap` | `number` | no | `1` | Available when style is "v1". Gap between bars in pixels when waveform_style is "bars". | +| `--bar-style` | `string` | no | `square` | Available when style is "v1". Bar style when waveform_style is "bars". | +| `--axis-label-color` | `string` | no | `value` | Available when style is "v1". Color for axis labels in "rrggbbaa" format. | +| `--no-axis-labels` | `boolean` | no | `true` | Available when style is "v1". If set to true, renders waveform image without axis labels. | +| `--with-axis-labels` | `boolean` | no | `true` | Available when style is "v1". If set to true, renders waveform image with axis labels. | +| `--amplitude-scale` | `number` | no | `1` | Available when style is "v1". Amplitude scale factor. | +| `--compression` | `number` | no | `1` | Available when style is "v1". PNG compression level: 0 (none) to 9 (best), or -1 (default). Only applicable when format is "image". | + +**Examples** + +```bash +transloadit audio waveform --input input.mp3 --out output.png +``` + +#### `text speak` + +Speak text + +Runs `/text/speak` on each input file and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit text speak --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/text/speak` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--prompt` | `string` | no | `"A red bicycle in a studio"` | Which text to speak. You can also set this to null and supply an input text file. | +| `--provider` | `string` | yes | `aws` | Which AI provider to leverage. Transloadit outsources this task and abstracts the interface so you can expect the same data structures, but different latencies and information… | +| `--target-language` | `string` | no | `en-US` | The written language of the document. This will also be the language of the spoken text. The language should be specified in the BCP-47 format, such as "en-GB", "de-DE" or… | +| `--voice` | `string` | no | `female-1` | The gender to be used for voice synthesis. Please consult the list of supported languages and voices. | +| `--ssml` | `boolean` | no | `true` | Supply Speech Synthesis Markup Language instead of raw text, in order to gain more control over how your text is voiced, including rests and pronounciations. | + +**Examples** + +```bash +transloadit text speak --input input.pdf --provider aws --out output.mp3 +``` + +#### `video thumbs` + +Extract thumbnails from videos + +Runs `/video/thumbs` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit video thumbs --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/video/thumbs` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--ffmpeg` | `json` | no | `value` | A parameter object to be passed to FFmpeg. If a preset is used, the options specified are merged on top of the ones from the preset. For available options, see the FFmpeg… | +| `--count` | `number` | no | `1` | The number of thumbnails to be extracted. As some videos have incorrect durations, the actual number of thumbnails generated may be less in rare cases. The maximum number of… | +| `--offsets` | `auto` | no | `value` | An array of offsets representing seconds of the file duration, such as [ 2, 45, 120 ]. | +| `--format` | `string` | no | `jpg` | The format of the extracted thumbnail. Supported values are "jpg", "jpeg" and "png". Even if you specify the format to be "jpeg" the resulting thumbnails will have a "jpg" file… | +| `--width` | `number` | no | `1` | The width of the thumbnail, in pixels. Defaults to the original width of the video. | +| `--height` | `number` | no | `1` | The height of the thumbnail, in pixels. Defaults to the original height of the video. | +| `--resize-strategy` | `string` | no | `crop` | One of the available resize strategies. | +| `--background` | `string` | no | `value` | The background color of the resulting thumbnails in the "rrggbbaa" format (red, green, blue, alpha) when used with the "pad" resize strategy. The default color is black. | +| `--rotate` | `number` | no | `0` | Forces the video to be rotated by the specified degree integer. | +| `--input-codec` | `string` | no | `value` | Specifies the input codec to use when decoding the video. This is useful for videos with special codecs that require specific decoders. | + +**Examples** + +```bash +transloadit video thumbs --input input.mp4 --out output/ +``` + +#### `video encode-hls` + +Run builtin/encode-hls-video@latest + +Runs the `builtin/encode-hls-video@latest` template and writes the outputs to `--out`. + +**Usage** + +```bash +npx transloadit video encode-hls --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `builtin/encode-hls-video@latest` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit video encode-hls --input input.mp4 --out output/ +``` + +#### `image describe` + +Describe images as labels or publishable text fields + +Generates image labels through `/image/describe`, or structured altText/title/caption/description through `/ai/chat`, then writes the JSON result to `--out`. + +**Usage** + +```bash +npx transloadit image describe --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `image-describe` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--fields` | `string[]` | no | — | Describe output fields to generate, for example labels or altText,title,caption,description | +| `--for` | `string` | no | — | Use a named output profile, currently: wordpress | +| `--model` | `string` | no | — | Model to use for generated text fields (default: anthropic/claude-4-sonnet-20250514) | + +**Examples** + +```bash +# Describe an image as labels +transloadit image describe --input hero.jpg --out labels.json +# Generate WordPress-ready fields +transloadit image describe --input hero.jpg --for wordpress --out fields.json +# Request a custom field set +transloadit image describe --input hero.jpg --fields altText,title,caption --out fields.json +``` + +#### `markdown pdf` + +Render Markdown files as PDFs + +Runs `/document/convert` with `format: pdf`, letting the backend render Markdown and preserve features such as internal heading links in the generated PDF. + +**Usage** + +```bash +npx transloadit markdown pdf --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `markdown-pdf` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--markdown-format` | `string` | no | — | Markdown variant to parse, either commonmark or gfm | +| `--markdown-theme` | `string` | no | — | Markdown theme to render, either github or bare | + +**Examples** + +```bash +# Render a Markdown file as a PDF file +transloadit markdown pdf --input README.md --out README.pdf +# Print a temporary result URL without downloading locally +transloadit markdown pdf --input README.md --print-urls +``` + +#### `markdown docx` + +Render Markdown files as DOCX documents + +Runs `/document/convert` with `format: docx`, letting the backend render Markdown and convert it into a Word document. + +**Usage** + +```bash +npx transloadit markdown docx --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: per-file; supports `--watch` +- Backend: semantic alias `markdown-docx` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--markdown-format` | `string` | no | — | Markdown variant to parse, either commonmark or gfm | +| `--markdown-theme` | `string` | no | — | Markdown theme to render, either github or bare | + +**Examples** + +```bash +# Render a Markdown file as a DOCX file +transloadit markdown docx --input README.md --out README.docx +# Print a temporary result URL without downloading locally +transloadit markdown docx --input README.md --print-urls +``` + +#### `file compress` + +Compress files + +Runs `/file/compress` for the provided inputs and writes the result to `--out`. + +**Usage** + +```bash +npx transloadit file compress --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: file +- Execution: single assembly +- Backend: `/file/compress` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags listed above. + +**Command options** + +| Flag | Type | Required | Example | Description | +| --- | --- | --- | --- | --- | +| `--format` | `string` | no | `zip` | The format of the archive to be created. Supported values are "tar" and "zip". Note that "tar" without setting gzip to true results in an archive that's not compressed in any way. | +| `--gzip` | `boolean` | no | `true` | Determines if the result archive should also be gzipped. Gzip compression is only applied if you use the "tar" format. | +| `--password` | `string` | no | `value` | This allows you to encrypt all archive contents with a password and thereby protect it against unauthorized use. | +| `--compression-level` | `number` | no | `1` | Determines how fiercely to try to compress the archive. -0 is compressionless, which is suitable for media that is already compressed. -1 is fastest with lowest compression. -9… | +| `--file-layout` | `string` | no | `advanced` | Determines if the result archive should contain all files in one directory (value for this is "simple") or in subfolders according to the explanation below (value for this is… | +| `--archive-name` | `string` | no | `value` | The name of the archive file to be created (without the file extension). | + +**Examples** + +```bash +transloadit file compress --input input.file --out output.file +``` + +#### `file decompress` + +Decompress archives + +Runs `/file/decompress` on each input file and writes the results to `--out`. + +**Usage** + +```bash +npx transloadit file decompress --input [options] +``` + +**Quick facts** + +- Input: file, dir, URL, base64 +- Output: directory +- Execution: per-file; supports `--single-assembly` and `--watch` +- Backend: `/file/decompress` + +**Shared flags** + +- Uses the shared file input and output flags listed above. +- Also supports the shared base processing flags, watch flags, bundling flags listed above. + +**Examples** + +```bash +transloadit file decompress --input input.file --out output/ +``` + + + +For full control, create Assemblies directly using Assembly Instructions (steps) or Templates: + +```bash +# Process a file using a steps file +npx transloadit assemblies create --steps steps.json --input image.jpg --output result.jpg + +# Process using a Template +npx transloadit assemblies create --template YOUR_TEMPLATE_ID --input image.jpg --output result.jpg + +# Process with custom fields +npx transloadit assemblies create --template YOUR_TEMPLATE_ID --field size=100 --input image.jpg --output thumb.jpg + +# Process a directory of files +npx transloadit assemblies create --template YOUR_TEMPLATE_ID --input images/ --output thumbs/ + +# Process recursively with file watching +npx transloadit assemblies create --template YOUR_TEMPLATE_ID --input images/ --output thumbs/ --recursive --watch + +# Process multiple files in a single assembly +npx transloadit assemblies create --template YOUR_TEMPLATE_ID --input file1.jpg --input file2.jpg --output results/ --single-assembly + +# Limit concurrent processing (default: 5) +npx transloadit assemblies create --template YOUR_TEMPLATE_ID --input images/ --output thumbs/ --concurrency 2 +``` + +### Managing Assemblies + +```bash +# List recent assemblies +npx transloadit assemblies list + +# List assemblies with filters +npx transloadit assemblies list --after 2024-01-01 --before 2024-12-31 + +# Get assembly status +npx transloadit assemblies get ASSEMBLY_ID + +# Cancel an assembly +npx transloadit assemblies delete ASSEMBLY_ID + +# Replay an assembly (re-run with original instructions) +npx transloadit assemblies replay ASSEMBLY_ID + +# Replay with different steps +npx transloadit assemblies replay --steps new-steps.json ASSEMBLY_ID + +# Replay using latest template version +npx transloadit assemblies replay --reparse-template ASSEMBLY_ID +``` + +### Linting Assembly Instructions + +Lint Assembly Instructions locally using the same linter as the API. + +```bash +# From a JSON file (full instructions or steps-only) +npx transloadit assemblies lint --steps steps.json + +# From stdin +cat steps.json | npx transloadit assemblies lint + +# Merge template content before linting +npx transloadit assemblies lint --template TEMPLATE_ID --steps steps.json + +# Treat warnings as fatal; apply fixes (overwrites files / stdout for stdin) +npx transloadit assemblies lint --fatal warning --fix --steps steps.json +``` + +When both `--template` and steps input are provided, Transloadit merges the template content with +the provided steps before linting, matching the API's runtime behavior. If the template sets +`allow_steps_override=false`, providing steps will fail with `TEMPLATE_DENIES_STEPS_OVERRIDE`. + +## SDK Helpers + +### prepareInputFiles + +`prepareInputFiles()` converts mixed file inputs into `files`, `uploads`, and optional +`/http/import` steps so you can pass them directly into `createAssembly()` or +`resumeAssemblyUploads()`. + +```ts +import { prepareInputFiles } from '@transloadit/node' + +const prepared = await prepareInputFiles({ + inputFiles: [ + { kind: 'path', field: 'video', path: '/tmp/video.mp4' }, + { kind: 'base64', field: 'logo', filename: 'logo.png', base64: '...' }, + { kind: 'url', field: 'remote', url: 'https://example.com/file.jpg' }, + ], + params: { + steps: { + ':original': { robot: '/upload/handle' }, + encode: { robot: '/video/encode', use: ':original' }, + }, + }, + base64Strategy: 'tempfile', + urlStrategy: 'import-if-present', + maxBase64Bytes: 512_000, + allowPrivateUrls: true, +}) + +await client.createAssembly({ + params: prepared.params, + files: prepared.files, + uploads: prepared.uploads, +}) +``` + +Options: + +- `inputFiles` – Array of `{ kind, field, ... }` entries for `path`, `base64`, or `url` inputs. +- `params` – Assembly instructions; steps will be extended when URL imports are injected. +- `fields` – Extra form fields to merge into `params.fields`. +- `base64Strategy` – `'buffer'` (default) or `'tempfile'` for base64 inputs. +- `urlStrategy` – `'import'`, `'download'`, or `'import-if-present'` (default `'import'`). +- `maxBase64Bytes` – Optional size cap (decoded bytes). Overages throw before decoding. +- `allowPrivateUrls` – Allow downloading private/loopback URLs when using `urlStrategy: 'download'` + (default `true`). Hosted deployments should disable this. +- `tempDir` – Optional temp directory base when `base64Strategy: 'tempfile'`. + +### Managing Templates + +```bash +# List all templates +npx transloadit templates list + +# Get template content +npx transloadit templates get TEMPLATE_ID + +# Create a template from a JSON file +npx transloadit templates create my-template template.json + +# Modify a template +npx transloadit templates modify TEMPLATE_ID template.json + +# Rename a template +npx transloadit templates modify TEMPLATE_ID --name new-name + +# Delete a template +npx transloadit templates delete TEMPLATE_ID + +# Sync local template files with Transloadit (bidirectional) +npx transloadit templates sync templates/*.json +npx transloadit templates sync --recursive templates/ +``` + +### Billing + +```bash +# Get bill for a month +npx transloadit bills get 2024-01 + +# Get detailed bill as JSON +npx transloadit bills get 2024-01 --json +``` + +### Assembly Notifications + +```bash +# Replay a notification +npx transloadit assembly-notifications replay ASSEMBLY_ID + +# Replay to a different URL +npx transloadit assembly-notifications replay --notify-url https://example.com/hook ASSEMBLY_ID +``` + +### Signature Generation + +```bash +# Generate a signature for assembly params +echo '{"steps":{}}' | npx transloadit auth signature + +# Generate with specific algorithm +echo '{"steps":{}}' | npx transloadit auth signature --algorithm sha256 + +# Generate a signed Smart CDN URL +echo '{"workspace":"my-workspace","template":"my-template","input":"image.jpg"}' | npx transloadit auth smart-cdn +``` + +### CLI Options + +All commands support these common options: + +- `--json, -j` - Output results as JSON (useful for scripting) +- `--log-level, -l` - Set log verbosity level by name or number (default: notice) +- `--endpoint` - Custom API endpoint URL (or set `TRANSLOADIT_ENDPOINT` env var) +- `--help, -h` - Show help for a command + +The `assemblies create` command additionally supports: + +- `--single-assembly` - Pass all input files to a single assembly instead of one assembly per file + +#### Log Levels + +The CLI uses [syslog severity levels](https://en.wikipedia.org/wiki/Syslog#Severity_level). Lower = more severe, higher = more verbose: + +| Level | Value | Description | +| -------- | ----- | ------------------------------------- | +| `err` | 3 | Error conditions | +| `warn` | 4 | Warning conditions | +| `notice` | 5 | Normal but significant **(default)** | +| `info` | 6 | Informational messages | +| `debug` | 7 | Debug-level messages | +| `trace` | 8 | Most verbose/detailed | + +You can use either the level name or its numeric value: + +```bash +# Show only errors and warnings +npx transloadit assemblies list -l warn +npx transloadit assemblies list -l 4 + +# Show debug output +npx transloadit assemblies list -l debug +npx transloadit assemblies list -l 7 +``` + +## SDK Usage + +The following code will upload an image and resize it to a thumbnail: + +```javascript +import { Transloadit } from '@transloadit/node' + +const transloadit = new Transloadit({ + authKey: 'YOUR_TRANSLOADIT_KEY', + authSecret: 'YOUR_TRANSLOADIT_SECRET', +}) + +try { + const options = { + files: { + file1: '/PATH/TO/FILE.jpg', + }, + params: { + steps: { + // You can have many Steps. In this case we will just resize any inputs (:original) + resize: { + use: ':original', + robot: '/image/resize', + result: true, + width: 75, + height: 75, + }, + }, + // OR if you already created a template, you can use it instead of "steps": + // template_id: 'YOUR_TEMPLATE_ID', + }, + waitForCompletion: true, // Wait for the Assembly (job) to finish executing before returning + } + + const status = await transloadit.createAssembly(options) + + if (status.results.resize) { + console.log('✅ Success - Your resized image:', status.results.resize[0].ssl_url) + } else { + console.log("❌ The Assembly didn't produce any output. Make sure you used a valid image file") + } +} catch (err) { + console.error('❌ Unable to process Assembly.', err) + if (err instanceof ApiError && err.assemblyId) { + console.error(`💡 More info: https://transloadit.com/assemblies/${err.assemblyId}`) + } +} +``` + +You can find [details about your executed Assemblies here](https://transloadit.com/assemblies). + +### Resuming interrupted uploads + +If an upload was interrupted, you can resume it by providing the original `assemblyUrl` and the +same input mapping. Resume relies on matching `fieldname`, `filename`, and `size`, so keep input +names stable and pass the same files. Only path-based inputs resume; Buffer/string/stream uploads +start a new tus upload automatically. + +You can pass the same upload and progress options as `createAssembly` (such as `chunkSize`, +`uploadConcurrency`, `uploadBehavior`, `waitForCompletion`, `timeout`, `onUploadProgress`, and +`onAssemblyProgress`). +When `waitForCompletion` is `true`, the SDK will poll and resolve once the Assembly is finished. + +```javascript +const status = await transloadit.resumeAssemblyUploads({ + assemblyUrl: 'https://api2.transloadit.com/assemblies/ASSEMBLY_ID', + files: { + file1: '/PATH/TO/FILE.jpg', + file2: '/PATH/TO/FILE2.jpg', + }, + uploadConcurrency: 2, +}) +``` + +## Examples + +- [Upload and resize image](https://github.com/transloadit/node-sdk/blob/main/examples/resize_an_image.ts) +- [Upload image and convert to WebP](https://github.com/transloadit/node-sdk/blob/main/examples/convert_to_webp.ts) +- [Rasterize SVG to PNG](https://github.com/transloadit/node-sdk/blob/main/examples/rasterize_svg_to_png.ts) +- [Crop a face out of an image and download the result](https://github.com/transloadit/node-sdk/blob/main/examples/face_detect_download.ts) +- [Retry example](https://github.com/transloadit/node-sdk/blob/main/examples/retry.ts) +- [Calculate total costs (GB usage)](https://github.com/transloadit/node-sdk/blob/main/examples/fetch_costs_of_all_assemblies_in_timeframe.ts) +- [Templates CRUD](https://github.com/transloadit/node-sdk/blob/main/examples/template_api.ts) +- [Template Credentials CRUD](https://github.com/transloadit/node-sdk/blob/main/examples/credentials.ts) + +For more fully working examples take a look at [`examples/`](https://github.com/transloadit/node-sdk/blob/main/examples/). + +For more example use cases and information about the available robots and their parameters, check out the [Transloadit website](https://transloadit.com/). + +## API + +These are the public methods on the `Transloadit` object and their descriptions. The methods are based on the [Transloadit API](https://transloadit.com/docs/api/). + +Table of contents: + +- [Main](#main) +- [Assemblies](#assemblies) +- [Assembly notifications](#assembly-notifications) +- [Templates](#templates) +- [Template Credentials](#template-credentials) +- [Errors](#errors) +- [Rate limiting & auto retry](#rate-limiting--auto-retry) + +### Main + +#### constructor(options) + +Returns a new instance of the client. + +The `options` object can contain the following keys: + +- `authKey` **(required)** - see [requirements](#requirements) +- `authSecret` **(required)** - see [requirements](#requirements) +- `endpoint` (default `'https://api2.transloadit.com'`) +- `maxRetries` (default `5`) - see [Rate limiting & auto retry](#rate-limiting--auto-retry) +- `gotRetry` (default `0`) - see [Rate limiting & auto retry](#rate-limiting--auto-retry) +- `timeout` (default `60000`: 1 minute) - the timeout (in milliseconds) for all requests (except `createAssembly`) +- `validateResponses` (default `false`) + +### Assemblies + +#### async createAssembly(options) + +Creates a new Assembly on Transloadit and optionally upload the specified `files` and `uploads`. + +You can provide the following keys inside the `options` object: + +- `params` **(required)** - An object containing keys defining the Assembly's behavior with the following keys: (See also [API doc](https://transloadit.com/docs/api/assemblies-post/) and [examples](#examples)) + - `steps` - Assembly instructions - See [Transloadit docs](https://transloadit.com/docs/topics/assembly-instructions/) and [demos](https://transloadit.com/demos/) for inspiration. + - `template_id` - The ID of the Template that contains your Assembly Instructions. **One of either `steps` or `template_id` is required.** If you specify both, then [any Steps will overrule the template](https://transloadit.com/docs/topics/templates/#overruling-templates-at-runtime). + - `fields` - An object of form fields to add to the request, to make use of in the Assembly instructions via [Assembly variables](https://transloadit.com/docs#assembly-variables). + - `notify_url` - Transloadit can send a Pingback to your server when the Assembly is completed. We'll send the Assembly Status in JSON encoded string inside a transloadit field in a multipart POST request to the URL supplied here. +- `files` - An object (key-value pairs) containing one or more file paths to upload and use in your Assembly. The _key_ is the _field name_ and the _value_ is the path to the file to be uploaded. The _field name_ and the file's name may be used in the ([Assembly instructions](https://transloadit.com/docs/topics/assembly-instructions/)) (`params`.`steps`) to refer to the particular file. See example below. + - `'fieldName': '/path/to/file'` + - more files... +- `uploads` - An object (key-value pairs) containing one or more files to upload and use in your Assembly. The _key_ is the _file name_ and the _value_ is the _content_ of the file to be uploaded. _Value_ can be one of many types: + - `'fieldName': (Readable | Buffer | TypedArray | ArrayBuffer | string | Iterable | AsyncIterable | Promise)`. + - more uploads... +- `waitForCompletion` - A boolean (default is `false`) to indicate whether you want to wait for the Assembly to finish with all encoding results present before the promise is fulfilled. If `waitForCompletion` is `true`, this SDK will poll for status updates and fulfill the promise when all encoding work is done. +- `timeout` - Number of milliseconds to wait before aborting (default `86400000`: 24 hours). +- `onUploadProgress` - An optional function that will be periodically called with the file upload progress, which is an with an object containing: + - `uploadedBytes` - Number of bytes uploaded so far. + - `totalBytes` - Total number of bytes to upload or `undefined` if unknown (Streams). +- `onAssemblyProgress` - Once the Assembly has started processing this will be periodically called with the _Assembly Execution Status_ (result of `getAssembly`) **only if `waitForCompletion` is `true`**. +- `chunkSize` - (for uploads) a number indicating the maximum size of a tus `PATCH` request body in bytes. Default to `Infinity` for file uploads and 50MB for streams of unknown length. See [tus-js-client](https://github.com/tus/tus-js-client/blob/master/docs/api.md#chunksize). +- `uploadConcurrency` - Maximum number of concurrent tus file uploads to occur at any given time (default 10.) +- `uploadBehavior` - Controls how uploads are handled: + - `await` (default) waits for all uploads to finish. + - `background` starts uploads and returns once upload URLs are created. + - `none` returns upload URLs without uploading any bytes. + - When `uploadBehavior` is not `await`, `waitForCompletion` is ignored. + +**NOTE**: Make sure the key in `files` and `uploads` is not one of `signature`, `params` or `max_size`. + +When `uploadBehavior` is `background` or `none`, the resolved Assembly object includes +`upload_urls` with a map of field names to tus upload URLs. + +Example code showing all options: + +```js +await transloadit.createAssembly({ + files: { + file1: '/path/to/file.jpg' + // ... + }, + uploads: { + 'file2.bin': Buffer.from([0, 0, 7]), // A buffer + 'file3.txt': 'file contents', // A string + 'file4.jpg': process.stdin // A stream + // ... + }, + params: { + steps: { ... }, + template_id: 'MY_TEMPLATE_ID', + fields: { + field1: 'Field value', + // ... + }, + notify_url: 'https://example.com/notify-url', + }, + waitForCompletion: true, + timeout: 60000, + onUploadProgress, + onAssemblyProgress, +}) +``` + +Example `onUploadProgress` and `onAssemblyProgress` handlers: + +```javascript +function onUploadProgress({ uploadedBytes, totalBytes }) { + // NOTE: totalBytes may be undefined + console.log(`♻️ Upload progress polled: ${uploadedBytes} of ${totalBytes} bytes uploaded.`) +} +function onAssemblyProgress(assembly) { + console.log( + `♻️ Assembly progress polled: ${assembly.error ? assembly.error : assembly.ok} ${ + assembly.assembly_id + } ... ` + ) +} +``` + +**Tip:** `createAssembly` returns a `Promise` with an extra property `assemblyId`. This can be used to retrieve the Assembly ID before the Assembly has even been created. Useful for debugging by logging this ID when the request starts, for example: + +```js +const promise = transloadit.createAssembly(options) +console.log('Creating', promise.assemblyId) +const status = await promise +``` + +See also: + +- [API documentation](https://transloadit.com/docs/api/assemblies-post/) +- Error codes and retry logic below + +#### async lintAssemblyInstructions(options) + +Lint Assembly Instructions locally using the same linter as the API. +If you provide a `templateId`, the template content is fetched and merged with your instructions +before linting (matching the API's runtime merge behavior). If the template sets +`allow_steps_override=false`, providing steps will throw `TEMPLATE_DENIES_STEPS_OVERRIDE`. + +The `options` object accepts: + +- `assemblyInstructions` - Assembly Instructions as a JSON string, a full instructions object, or a steps-only object. + If no `steps` property is present, the object is treated as steps. +- `templateId` - Optional template ID to merge before linting. +- `fatal` - `'error' | 'warning'` (default: `'error'`). When set to `'warning'`, warnings are treated as fatal. +- `fix` - Apply auto-fixes where possible. If `true`, the result includes `fixedInstructions`. + +The method returns: + +- `success` - `true` when no fatal issues are found. +- `issues` - Array of lint issues (each includes `code`, `type`, `row`, `column`, and `desc`). +- `fixedInstructions` - The fixed JSON string when `fix` is `true` (steps-only inputs return steps-only JSON). + +Example: + +```js +const result = await transloadit.lintAssemblyInstructions({ + assemblyInstructions: { + resize: { robot: '/image/resize', use: ':original', width: 100, height: 100 }, + }, + fatal: 'warning', +}) + +if (!result.success) { + console.log(result.issues) +} +``` + +#### async listAssemblies(params) + +Retrieve Assemblies according to the given `params`. + +Valid params can be `page`, `pagesize`, `type`, `fromdate`, `todate` and `keywords`. Please consult the [API documentation](https://transloadit.com/docs/api/assemblies-get/) for details. + +The method returns an object containing these properties: + +- `items`: An `Array` of up to `pagesize` Assemblies +- `count`: Total number of Assemblies + +#### streamAssemblies(params) + +Creates an `objectMode` `Readable` stream that automates handling of `listAssemblies` pagination. It accepts the same `params` as `listAssemblies`. + +This can be used to iterate through Assemblies: + +```javascript +const assemblyStream = transloadit.streamAssemblies({ fromdate: '2016-08-19 01:15:00 UTC' }) + +assemblyStream.on('readable', function () { + const assembly = assemblyStream.read() + if (assembly == null) console.log('end of stream') + + console.log(assembly.id) +}) +``` + +Results can also be piped. Here's an example using +[through2](https://github.com/rvagg/through2): + +```javascript +const assemblyStream = transloadit.streamAssemblies({ fromdate: '2016-08-19 01:15:00 UTC' }) + +assemblyStream + .pipe( + through.obj(function (chunk, enc, callback) { + this.push(chunk.id + '\n') + callback() + }) + ) + .pipe(fs.createWriteStream('assemblies.txt')) +``` + +#### async getAssembly(assemblyId) + +Retrieves the JSON status of the Assembly identified by the given `assemblyId`. See [API documentation](https://transloadit.com/docs/api/assemblies-assembly-id-get/). + +#### async cancelAssembly(assemblyId) + +Removes the Assembly identified by the given `assemblyId` from the memory of the Transloadit machines, ultimately cancelling it. This does not delete the Assembly from the database - you can still access it on `https://transloadit.com/assemblies/{assembly_id}` in your Transloadit account. This also does not delete any files associated with the Assembly from the Transloadit servers. See [API documentation](https://transloadit.com/docs/api/assemblies-assembly-id-delete/). + +#### async replayAssembly(assemblyId, params) + +Replays the Assembly identified by the given `assemblyId` (required argument). Optionally you can also provide a `notify_url` key inside `params` if you want to change the notification target. See [API documentation](https://transloadit.com/docs/api/assemblies-assembly-id-replay-post/) for more info about `params`. + +The response from the `replayAssembly` is minimal and does not contain much information about the replayed assembly. Please call `getAssembly` or `awaitAssemblyCompletion` after replay to get more information: + +```js +const replayAssemblyResponse = await transloadit.replayAssembly(failedAssemblyId) + +const assembly = await transloadit.getAssembly(replayAssemblyResponse.assembly_id) +// Or +const completedAssembly = await transloadit.awaitAssemblyCompletion( + replayAssemblyResponse.assembly_id +) +``` + +#### async awaitAssemblyCompletion(assemblyId, opts) + +This function will continously poll the specified Assembly `assemblyId` and resolve when it is done uploading and executing (until `result.ok` is no longer `ASSEMBLY_UPLOADING`, `ASSEMBLY_EXECUTING` or `ASSEMBLY_REPLAYING`). It resolves with the same value as `getAssembly`. + +`opts` is an object with the keys: + +- `onAssemblyProgress` - A progress function called on each poll. See `createAssembly` +- `timeout` - How many milliseconds until polling times out (default: no timeout) +- `interval` - Poll interval in milliseconds (default `1000`) +- `signal` - An `AbortSignal` to cancel polling. When aborted, the promise rejects with an `AbortError`. +- `onPoll` - A callback invoked at the start of each poll iteration. Return `false` to stop polling early and resolve with the last known status. Useful for implementing custom cancellation logic (e.g., superseding assemblies in watch mode). + +#### getLastUsedAssemblyUrl() + +Returns the internal url that was used for the last call to `createAssembly`. This is meant to be used for debugging purposes. + +### Assembly notifications + +#### async replayAssemblyNotification(assemblyId, params) + +Replays the notification for the Assembly identified by the given `assemblyId` (required argument). Optionally you can also provide a `notify_url` key inside `params` if you want to change the notification target. See [API documentation](https://transloadit.com/docs/api/assembly-notifications-assembly-id-replay-post/) for more info about `params`. + +### Templates + +Templates are Steps that can be reused. [See example template code](examples/template_api.ts). + +#### async createTemplate(params) + +Creates a template the provided params. The required `params` keys are: + +- `name` - The template name +- `template` - The template JSON object containing its `steps` + +See also [API documentation](https://transloadit.com/docs/api/templates-post/). + +```js +const template = { + steps: { + encode: { + use: ':original', + robot: '/video/encode', + preset: 'ipad-high', + }, + thumbnail: { + use: 'encode', + robot: '/video/thumbnails', + }, + }, +} + +const result = await transloadit.createTemplate({ name: 'my-template-name', template }) +console.log('✅ Template created with template_id', result.id) +``` + +#### async editTemplate(templateId, params) + +Updates the template represented by the given `templateId` with the new value. The `params` works just like the one from the `createTemplate` call. See [API documentation](https://transloadit.com/docs/api/templates-template-id-put/). + +#### async getTemplate(templateId) + +Retrieves the name and the template JSON for the template represented by the given `templateId`. See [API documentation](https://transloadit.com/docs/api/templates-template-id-get/). + +#### async deleteTemplate(templateId) + +Deletes the template represented by the given `templateId`. See [API documentation](https://transloadit.com/docs/api/templates-template-id-delete/). + +#### async listTemplates(params) + +Retrieve all your templates. See [API documentation](https://transloadit.com/docs/api/templates-template-id-get/) for more info about `params`. + +The method returns an object containing these properties: + +- `items`: An `Array` of up to `pagesize` templates +- `count`: Total number of templates + +#### streamTemplates(params) + +Creates an `objectMode` `Readable` stream that automates handling of `listTemplates` pagination. Similar to `streamAssemblies`. + +### Template Credentials + +Template Credentials allow you to store third-party credentials (e.g., AWS S3, Google Cloud Storage, FTP) securely on Transloadit for use in your Assembly Instructions. + +#### async createTemplateCredential(params) + +Creates a new Template Credential. The `params` object should contain the credential configuration. See [API documentation](https://transloadit.com/docs/api/template-credentials-post/). + +#### async editTemplateCredential(credentialId, params) + +Updates an existing Template Credential identified by `credentialId`. See [API documentation](https://transloadit.com/docs/api/template-credentials-credential-id-put/). + +#### async deleteTemplateCredential(credentialId) + +Deletes the Template Credential identified by `credentialId`. See [API documentation](https://transloadit.com/docs/api/template-credentials-credential-id-delete/). + +#### async getTemplateCredential(credentialId) + +Retrieves the Template Credential identified by `credentialId`. See [API documentation](https://transloadit.com/docs/api/template-credentials-credential-id-get/). + +#### async listTemplateCredentials(params) + +Lists all Template Credentials. See [API documentation](https://transloadit.com/docs/api/template-credentials-get/). + +#### streamTemplateCredentials(params) + +Creates an `objectMode` `Readable` stream that automates handling of `listTemplateCredentials` pagination. Similar to `streamAssemblies`. + +### Other + +#### setDefaultTimeout(timeout) + +Same as `constructor` `timeout` option: Set the default timeout (in milliseconds) for all requests (except `createAssembly`) + +#### async getBill(date) + +Retrieves the billing data for a given `date` string with format `YYYY-MM`. See [API documentation](https://transloadit.com/docs/api/bill-date-get/). + +#### calcSignature(params) + +Calculates a signature for the given `params` JSON object. If the `params` object does not include an `authKey` or `expires` keys (and their values) in the `auth` sub-key, then they are set automatically. + +This function returns an object with the key `signature` (containing the calculated signature string) and a key `params`, which contains the stringified version of the passed `params` object (including the set expires and authKey keys). + +See [Signature Generation](#signature-generation) in the CLI section for command-line usage. + +#### getSignedSmartCDNUrl(params) + +Constructs a signed Smart CDN URL, as defined in the [API documentation](https://transloadit.com/docs/topics/signature-authentication/#smart-cdn). `params` must be an object with the following properties: + +- `workspace` - Workspace slug (required) +- `template` - Template slug or template ID (required) +- `input` - Input value that is provided as `${fields.input}` in the template (required) +- `urlParams` - Object with additional parameters for the URL query string (optional) +- `expiresAt` - Expiration timestamp of the signature in milliseconds since UNIX epoch. Defaults to 1 hour from now. (optional) + +Example: + +```js +const client = new Transloadit({ authKey: 'foo_key', authSecret: 'foo_secret' }) +const url = client.getSignedSmartCDNUrl({ + workspace: 'foo_workspace', + template: 'foo_template', + input: 'foo_input', + urlParams: { + foo: 'bar', + }, +}) + +// url is: +// https://foo_workspace.tlcdn.com/foo_template/foo_input?auth_key=foo_key&exp=1714525200000&foo=bar&sig=sha256:9548915ec70a5f0d05de9497289e792201ceec19a526fe315f4f4fd2e7e377ac +``` + +### Errors + +Any errors originating from Node.js will be passed on and we use [GOT](https://github.com/sindresorhus/got) v11 for HTTP requests. [Errors from `got`](https://github.com/sindresorhus/got/tree/v11.8.6?tab=readme-ov-file#errors) will also be passed on, _except_ the `got.HTTPError` which will be replaced with a `transloadit.ApiError`, which will have its `cause` property set to the instance of the original `got.HTTPError`. `transloadit.ApiError` has these properties: + +- `code` (`string`) - [The Transloadit API error code](https://transloadit.com/docs/api/response-codes/#error-codes). +- `rawMessage` (`string`) - A textual representation of the Transloadit API error. +- `reason` (`string`) - Additional information about the Transloadit API error. +- `assemblyId`: (`string`) - If the request is related to an assembly, this will be the ID of the assembly. +- `assemblySslUrl` (`string`) - If the request is related to an assembly, this will be the SSL URL to the assembly . + +To identify errors you can either check its props or use `instanceof`, e.g.: + +```js +try { + await transloadit.createAssembly(options) +} catch (err) { + if (err instanceof got.TimeoutError) { + return console.error('The request timed out', err) + } + if (err.code === 'ENOENT') { + return console.error('Cannot open file', err) + } + if (err instanceof ApiError && err.code === 'ASSEMBLY_INVALID_STEPS') { + return console.error('Invalid Assembly Steps', err) + } +} +``` + +**Note:** Assemblies that have an error status (`assembly.error`) will only result in an error being thrown from `createAssembly` and `replayAssembly`. For other Assembly methods, no errors will be thrown, but any error can be found in the response's `error` property (also `ApiError.code`). + +- [More information on Transloadit errors (`ApiError.code`)](https://transloadit.com/docs/api/response-codes/#error-codes) +- [More information on request errors](https://github.com/sindresorhus/got#errors) + +### Rate limiting & auto retry + +There are three kinds of retries: + +#### Retry on rate limiting (`maxRetries`, default `5`) + +All functions of the client automatically obey all rate limiting imposed by Transloadit (e.g. `RATE_LIMIT_REACHED`), so there is no need to write your own wrapper scripts to handle rate limits. The SDK will by default retry requests **5 times** with auto back-off (See `maxRetries` constructor option). + +#### GOT HTTP retries (`gotRetry`, default `{ limit: 0 }`) + +Because we use [got](https://github.com/sindresorhus/got) under the hood, you can pass a `gotRetry` constructor option which is passed on to `got`. This offers great flexibility for handling retries on network errors and HTTP status codes with auto back-off. See [`got` `retry` object documentation](https://github.com/sindresorhus/got/blob/main/documentation/7-retry.md). + +**Note that the above `maxRetries` option does not affect the `gotRetry` logic.** + +#### Validate API responses (`validateResponses`, default `false`) + +As we have ported the JavaScript SDK to TypeScript in v4, we are now also validating API responses using `zod` schemas. Having schema validation enabled (`true`), guarantees that the data returned by the SDK adheres to the TypeScript types of this SDK. However we are still working on improving the schemas and they are not yet 100% complete. This means that if you hit a bug in the schemas, a `zod` schema validation error will be thrown. If you encounter such an error, please report it and we will fix it as soon as possible. If you set this option to `false`, schema validation will be disabled, and you won't get any such errors, however the TypeScript types will not protect you should such a bug be encountered. + +#### Custom retry logic + +If you want to retry on other errors, please see the [retry example code](examples/retry.ts). + +- https://transloadit.com/docs/api/rate-limiting/ +- https://transloadit.com/blog/2012/04/introducing-rate-limiting/ + +## Debugging + +This project uses [debug](https://github.com/visionmedia/debug) so you can run node with the `DEBUG=transloadit` evironment variable to enable verbose logging. Example: + +```bash +DEBUG=transloadit* node examples/template_api.ts +``` + +## Maintainers + +- [Mikael Finstad](https://github.com/mifi) + +### Changelog + +See [Releases](https://github.com/transloadit/node-sdk/releases) + +## Attribution + +Thanks to [Ian Hansen](https://github.com/supershabam) for donating the `transloadit` npm name. You can still access his code under [`v0.0.0`](https://www.npmjs.com/package/transloadit/v/0.0.0). + +## License + +[MIT](LICENSE) © [Transloadit](https://transloadit.com) + +## Development + +See [CONTRIBUTING](./CONTRIBUTING.md). + + + + diff --git a/packages/transloadit/package.json b/packages/transloadit/package.json index 63814af2..5fdcc4f0 100644 --- a/packages/transloadit/package.json +++ b/packages/transloadit/package.json @@ -1,6 +1,6 @@ { "name": "transloadit", - "version": "4.7.4", + "version": "4.7.5", "description": "Node.js SDK for Transloadit", "homepage": "https://github.com/transloadit/node-sdk/tree/main/packages/node", "bugs": { @@ -36,6 +36,7 @@ "@aws-sdk/s3-request-presigner": "^3.891.0", "@transloadit/sev-logger": "^0.1.9", "@transloadit/utils": "^4.3.0", + "cacheable-lookup": "^7.0.0", "clipanion": "^4.0.0-rc.4", "debug": "^4.4.3", "dotenv": "^17.2.3", @@ -70,19 +71,20 @@ "src": "./src" }, "scripts": { - "check": "yarn lint:ts && yarn fix && yarn test:unit", + "check": "yarn sync:intent-docs && yarn lint:ts && yarn test:unit", + "sync:intent-docs": "node src/cli/generateIntentDocs.ts", "fix:js": "biome check --write .", "lint:ts": "yarn --cwd ../.. tsc:node", "fix:js:unsafe": "biome check --write . --unsafe", "lint:js": "biome check .", - "lint": "npm-run-all --parallel 'lint:js'", - "fix": "npm-run-all --serial 'fix:js'", + "lint": "yarn lint:js", + "fix": "yarn fix:js", "lint:deps": "knip --dependencies --no-progress", "fix:deps": "knip --dependencies --no-progress --fix", "prepack": "node ../../scripts/prepare-transloadit.ts", - "test:unit": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage ./test/unit", - "test:e2e": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run ./test/e2e", - "test": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage" + "test:unit": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage --passWithNoTests ./test/unit", + "test:e2e": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --passWithNoTests ./test/e2e", + "test": "yarn --cwd ../.. tsc:utils && ../../node_modules/.bin/vitest run --coverage --passWithNoTests" }, "license": "MIT", "main": "./dist/Transloadit.js", diff --git a/scripts/fingerprint-pack.ts b/scripts/fingerprint-pack.ts index beef9e2c..cad2f1a0 100644 --- a/scripts/fingerprint-pack.ts +++ b/scripts/fingerprint-pack.ts @@ -3,7 +3,7 @@ import { createHash } from 'node:crypto' import { createReadStream } from 'node:fs' import { mkdir, mkdtemp, readFile, rm, stat, writeFile } from 'node:fs/promises' import { tmpdir } from 'node:os' -import { resolve } from 'node:path' +import { relative, resolve, sep } from 'node:path' import { promisify } from 'node:util' const execFileAsync = promisify(execFile) @@ -112,6 +112,11 @@ const runWithConcurrency = async ( return results } +const normalizePackageDir = (cwd: string): string => { + const normalized = relative(process.cwd(), cwd).split(sep).join('/') + return normalized === '' ? '.' : normalized +} + const main = async (): Promise => { const { target, out, keep, ignoreScripts, quiet } = parseArgs() const cwd = resolve(process.cwd(), target) @@ -153,7 +158,7 @@ const main = async (): Promise => { const packageJson = JSON.parse(packageJsonRaw) const summary = { - packageDir: cwd, + packageDir: normalizePackageDir(cwd), tarball: { filename: info.filename, sizeBytes: tarballStat.size, diff --git a/scripts/prepare-transloadit.ts b/scripts/prepare-transloadit.ts index d0f298c1..64ecb8c7 100644 --- a/scripts/prepare-transloadit.ts +++ b/scripts/prepare-transloadit.ts @@ -28,13 +28,62 @@ const formatPackageJson = (data: Record): string => { type PackageJson = Record & { scripts?: Record } +function replaceRequired( + value: string, + searchValue: string, + replaceValue: string, + label: string, +): string { + if (!value.includes(searchValue)) { + throw new Error(`Expected ${label} to include ${JSON.stringify(searchValue)}`) + } + + return value.replace(searchValue, replaceValue) +} + +function deriveLegacyScripts(nodeScripts: Record): Record { + const scripts = { ...nodeScripts } + if (scripts.check != null) { + scripts.check = replaceRequired(scripts.check, ' && yarn fix', '', 'scripts.check') + } + + if (scripts['test:unit'] != null) { + scripts['test:unit'] = replaceRequired( + scripts['test:unit'], + 'vitest run --coverage ./test/unit', + 'vitest run --coverage --passWithNoTests ./test/unit', + 'scripts.test:unit', + ) + } + + if (scripts['test:e2e'] != null) { + scripts['test:e2e'] = replaceRequired( + scripts['test:e2e'], + 'vitest run ./test/e2e', + 'vitest run --passWithNoTests ./test/e2e', + 'scripts.test:e2e', + ) + } + + if (scripts.test != null) { + scripts.test = replaceRequired( + scripts.test, + 'vitest run --coverage', + 'vitest run --coverage --passWithNoTests', + 'scripts.test', + ) + } + + scripts.prepack = 'node ../../scripts/prepare-transloadit.ts' + return scripts +} + const writeLegacyPackageJson = async (): Promise => { const nodePackageJson = await readJson(resolve(nodePackage, 'package.json')) const legacyExisting = await readJson(resolve(legacyPackage, 'package.json')).catch( () => null, ) - const scripts = { ...(nodePackageJson.scripts ?? {}) } - scripts.prepack = 'node ../../scripts/prepare-transloadit.ts' + const scripts = deriveLegacyScripts(nodePackageJson.scripts ?? {}) const legacyPackageJson: PackageJson = { ...nodePackageJson, name: 'transloadit', diff --git a/yarn.lock b/yarn.lock index 9ebd10ac..2b8fa410 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2552,6 +2552,7 @@ __metadata: "@types/recursive-readdir": "npm:^2.2.4" "@types/temp": "npm:^0.9.4" badge-maker: "npm:^5.0.2" + cacheable-lookup: "npm:^7.0.0" clipanion: "npm:^4.0.0-rc.4" debug: "npm:^4.4.3" dotenv: "npm:^17.2.3" @@ -7664,6 +7665,7 @@ __metadata: "@types/minimist": "npm:^1.2.5" "@types/node": "npm:^24.10.3" "@types/recursive-readdir": "npm:^2.2.4" + cacheable-lookup: "npm:^7.0.0" clipanion: "npm:^4.0.0-rc.4" debug: "npm:^4.4.3" dotenv: "npm:^17.2.3"