Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 35 additions & 7 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,42 @@ jobs:
GH_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
# Use a rust-release version that includes all native binaries.
CODEX_VERSION=0.74.0
# Use the newest successful rust-release workflow that still has native artifacts.
OUTPUT_DIR="${RUNNER_TEMP}"
python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \
--package codex \
--output-dir "$OUTPUT_DIR"
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
set +e
WORKFLOW_URLS=$(gh run list \
--workflow .github/workflows/rust-release.yml \
--json status,conclusion,headBranch,url \
--jq '.[] | select(.status=="completed" and .conclusion=="success" and (.headBranch | startswith("rust-v")) and (.url | contains("/actions/runs/"))) | .url')
set -e

if [ -z "$WORKFLOW_URLS" ]; then
echo "Unable to resolve a completed successful rust-release workflow."
exit 1
fi

for WORKFLOW_URL in $WORKFLOW_URLS; do
WORKFLOW_ID="${WORKFLOW_URL##*/}"
CODEX_VERSION="$(gh run view "$WORKFLOW_ID" --json headBranch -q '.headBranch | sub("^rust-v"; "")')"

echo "Attempting npm staging from ${WORKFLOW_URL} (version ${CODEX_VERSION})."
if python3 ./scripts/stage_npm_packages.py \
--release-version "$CODEX_VERSION" \
--workflow-url "$WORKFLOW_URL" \
--package codex \
--output-dir "$OUTPUT_DIR"; then
PACK_OUTPUT="${OUTPUT_DIR}/codex-npm-${CODEX_VERSION}.tgz"
break
fi
echo "Npm staging failed for ${WORKFLOW_URL}; trying next rust-release run."
done

if [ -z "${PACK_OUTPUT:-}" ]; then
echo "::error::No eligible rust-release run could produce a stageable npm package."
exit 1
fi

echo "Staged package at ${PACK_OUTPUT}"
echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT"

- name: Upload staged npm package artifact
Expand Down
27 changes: 13 additions & 14 deletions .github/workflows/rust-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.93.0
- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-shear
version: 1.5.1
tool: cargo-shear@1.5.1
- name: cargo shear
run: cargo shear

Expand Down Expand Up @@ -141,8 +140,10 @@ jobs:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects (non-Windows).
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
# Speed up repeated builds across CI runs by caching compiled objects, except on
# arm64 macOS runners cross-targeting x86_64 where ring/cc-rs can produce
# mixed-architecture archives under sccache.
USE_SCCACHE: ${{ (startsWith(matrix.runner, 'windows') || (matrix.runner == 'macos-15-xlarge' && matrix.target == 'x86_64-apple-darwin')) && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G
# In rust-ci, representative release-profile checks use thin LTO for faster feedback.
Expand Down Expand Up @@ -291,8 +292,7 @@ jobs:
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
tool: sccache@0.7.5

- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
Expand Down Expand Up @@ -421,8 +421,7 @@ jobs:
if: ${{ matrix.profile == 'release' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: cargo-chef
version: 0.1.71
tool: cargo-chef@0.1.71

- name: Pre-warm dependency cache (cargo-chef)
if: ${{ matrix.profile == 'release' }}
Expand Down Expand Up @@ -506,8 +505,10 @@ jobs:
run:
working-directory: codex-rs
env:
# Speed up repeated builds across CI runs by caching compiled objects (non-Windows).
USE_SCCACHE: ${{ startsWith(matrix.runner, 'windows') && 'false' || 'true' }}
# Speed up repeated builds across CI runs by caching compiled objects, except on
# arm64 macOS runners cross-targeting x86_64 where ring/cc-rs can produce
# mixed-architecture archives under sccache.
USE_SCCACHE: ${{ (startsWith(matrix.runner, 'windows') || (matrix.runner == 'macos-15-xlarge' && matrix.target == 'x86_64-apple-darwin')) && 'false' || 'true' }}
CARGO_INCREMENTAL: "0"
SCCACHE_CACHE_SIZE: 10G

Expand Down Expand Up @@ -593,8 +594,7 @@ jobs:
if: ${{ env.USE_SCCACHE == 'true' }}
uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: sccache
version: 0.7.5
tool: sccache@0.7.5

- name: Configure sccache backend
if: ${{ env.USE_SCCACHE == 'true' }}
Expand Down Expand Up @@ -628,8 +628,7 @@ jobs:

- uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2
with:
tool: nextest
version: 0.9.103
tool: nextest@0.9.103

- name: Enable unprivileged user namespaces (Linux)
if: runner.os == 'Linux'
Expand Down
1 change: 1 addition & 0 deletions codex-rs/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions codex-rs/exec-server/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ workspace = true
base64 = { workspace = true }
clap = { workspace = true, features = ["derive"] }
codex-app-server-protocol = { workspace = true }
codex-environment = { workspace = true }
codex-utils-pty = { workspace = true }
futures = { workspace = true }
serde = { workspace = true, features = ["derive"] }
Expand Down
8 changes: 4 additions & 4 deletions codex-rs/exec-server/src/bin/codex-exec-server.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use clap::Parser;
use codex_exec_server::ExecServerTransport;
use codex_exec_server::DEFAULT_LISTEN_URL;

#[derive(Debug, Parser)]
struct ExecServerArgs {
Expand All @@ -8,15 +8,15 @@ struct ExecServerArgs {
#[arg(
long = "listen",
value_name = "URL",
default_value = ExecServerTransport::DEFAULT_LISTEN_URL
default_value = DEFAULT_LISTEN_URL
)]
listen: ExecServerTransport,
listen: String,
}

#[tokio::main]
async fn main() {
let args = ExecServerArgs::parse();
if let Err(err) = codex_exec_server::run_main_with_transport(args.listen).await {
if let Err(err) = codex_exec_server::run_main_with_transport(&args.listen).await {
eprintln!("{err}");
std::process::exit(1);
}
Expand Down
144 changes: 144 additions & 0 deletions codex-rs/exec-server/src/client.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,20 @@
use std::sync::Arc;
use std::time::Duration;

use codex_app_server_protocol::FsCopyParams;
use codex_app_server_protocol::FsCopyResponse;
use codex_app_server_protocol::FsCreateDirectoryParams;
use codex_app_server_protocol::FsCreateDirectoryResponse;
use codex_app_server_protocol::FsGetMetadataParams;
use codex_app_server_protocol::FsGetMetadataResponse;
use codex_app_server_protocol::FsReadDirectoryParams;
use codex_app_server_protocol::FsReadDirectoryResponse;
use codex_app_server_protocol::FsReadFileParams;
use codex_app_server_protocol::FsReadFileResponse;
use codex_app_server_protocol::FsRemoveParams;
use codex_app_server_protocol::FsRemoveResponse;
use codex_app_server_protocol::FsWriteFileParams;
use codex_app_server_protocol::FsWriteFileResponse;
use codex_app_server_protocol::JSONRPCNotification;
use serde_json::Value;
use tokio::io::AsyncRead;
Expand All @@ -26,6 +40,13 @@ use crate::protocol::ExecExitedNotification;
use crate::protocol::ExecOutputDeltaNotification;
use crate::protocol::ExecParams;
use crate::protocol::ExecResponse;
use crate::protocol::FS_COPY_METHOD;
use crate::protocol::FS_CREATE_DIRECTORY_METHOD;
use crate::protocol::FS_GET_METADATA_METHOD;
use crate::protocol::FS_READ_DIRECTORY_METHOD;
use crate::protocol::FS_READ_FILE_METHOD;
use crate::protocol::FS_REMOVE_METHOD;
use crate::protocol::FS_WRITE_FILE_METHOD;
use crate::protocol::INITIALIZE_METHOD;
use crate::protocol::INITIALIZED_METHOD;
use crate::protocol::InitializeParams;
Expand Down Expand Up @@ -326,6 +347,129 @@ impl ExecServerClient {
.map_err(Into::into)
}

pub async fn fs_read_file(
&self,
params: FsReadFileParams,
) -> Result<FsReadFileResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_read_file(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/readFile".to_string(),
));
};
remote
.call(FS_READ_FILE_METHOD, &params)
.await
.map_err(Into::into)
}

pub async fn fs_write_file(
&self,
params: FsWriteFileParams,
) -> Result<FsWriteFileResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_write_file(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/writeFile".to_string(),
));
};
remote
.call(FS_WRITE_FILE_METHOD, &params)
.await
.map_err(Into::into)
}

pub async fn fs_create_directory(
&self,
params: FsCreateDirectoryParams,
) -> Result<FsCreateDirectoryResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_create_directory(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/createDirectory".to_string(),
));
};
remote
.call(FS_CREATE_DIRECTORY_METHOD, &params)
.await
.map_err(Into::into)
}

pub async fn fs_get_metadata(
&self,
params: FsGetMetadataParams,
) -> Result<FsGetMetadataResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_get_metadata(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/getMetadata".to_string(),
));
};
remote
.call(FS_GET_METADATA_METHOD, &params)
.await
.map_err(Into::into)
}

pub async fn fs_read_directory(
&self,
params: FsReadDirectoryParams,
) -> Result<FsReadDirectoryResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_read_directory(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/readDirectory".to_string(),
));
};
remote
.call(FS_READ_DIRECTORY_METHOD, &params)
.await
.map_err(Into::into)
}

pub async fn fs_remove(
&self,
params: FsRemoveParams,
) -> Result<FsRemoveResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_remove(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/remove".to_string(),
));
};
remote
.call(FS_REMOVE_METHOD, &params)
.await
.map_err(Into::into)
}

pub async fn fs_copy(&self, params: FsCopyParams) -> Result<FsCopyResponse, ExecServerError> {
if let Some(backend) = self.inner.backend.as_local() {
return backend.fs_copy(params).await;
}
let Some(remote) = self.inner.backend.as_remote() else {
return Err(ExecServerError::Protocol(
"remote backend missing during fs/copy".to_string(),
));
};
remote
.call(FS_COPY_METHOD, &params)
.await
.map_err(Into::into)
}

async fn connect(
connection: JsonRpcConnection,
options: ExecServerClientConnectOptions,
Expand Down
Loading
Loading