Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 21 additions & 5 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@ jobs:
arch: x86_64
target: x86_64-unknown-linux-gnu
ext: ""
cargo_binstall_archive: true
- os: linux
arch: aarch64
target: aarch64-unknown-linux-gnu
ext: ""
cargo_binstall_archive: true
# - os: windows
# arch: x86_64
# target: x86_64-pc-windows-msvc
Expand All @@ -42,6 +44,7 @@ jobs:
arch: x86_64
target: x86_64-unknown-freebsd
ext: ""
cargo_binstall_archive: false
# - os: freebsd
# arch: aarch64
# target: aarch64-unknown-freebsd
Expand Down Expand Up @@ -104,11 +107,24 @@ jobs:
run: cross +stable build --release --target ${{ matrix.target }}
- name: Prepare artifact
run: |
mkdir -p artifacts/${{ matrix.os }}-${{ matrix.arch }}
cp target/${{ matrix.target }}/release/${{ env.REPO_NAME }}${{ matrix.ext }} \
artifacts/${{ matrix.os }}-${{ matrix.arch }}/${{ env.REPO_NAME }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.ext }}
sha256sum artifacts/${{ matrix.os }}-${{ matrix.arch }}/${{ env.REPO_NAME }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.ext }} > \
artifacts/${{ matrix.os }}-${{ matrix.arch }}/${{ env.REPO_NAME }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.ext }}.sha256
set -euo pipefail
version="${GITHUB_REF_NAME#v}"
artifact_dir="artifacts/${{ matrix.os }}-${{ matrix.arch }}"
binary_path="target/${{ matrix.target }}/release/${{ env.REPO_NAME }}${{ matrix.ext }}"
binary_name="${{ env.REPO_NAME }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.ext }}"

mkdir -p "${artifact_dir}"
cp "${binary_path}" "${artifact_dir}/${binary_name}"
sha256sum "${artifact_dir}/${binary_name}" > \
"${artifact_dir}/${binary_name}.sha256"

if [ "${{ matrix.cargo_binstall_archive }}" = "true" ]; then
archive_name="${{ env.REPO_NAME }}-${version}-${{ matrix.target }}.tar.gz"
tar -C "target/${{ matrix.target }}/release" -czf \
"${artifact_dir}/${archive_name}" "${{ env.REPO_NAME }}${{ matrix.ext }}"
sha256sum "${artifact_dir}/${archive_name}" > \
"${artifact_dir}/${archive_name}.sha256"
fi
- name: Upload release artifact
uses: actions/upload-artifact@v4
with:
Expand Down
8 changes: 8 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,18 @@ version = "0.3.1"
edition = "2024"
rust-version = "1.89"
license = "ISC"
repository = "https://github.com/leynos/mdtablefix"
description = """
`mdtablefix` unb0rks and reflows Markdown tables so that each column has a uniform width. When \
the `--wrap` option is used, it also wraps paragraphs and list items to 80 columns."""

[package.metadata.binstall]

[package.metadata.binstall.overrides.'cfg(all(target_os = "linux", any(target_arch = "x86_64", target_arch = "aarch64"), target_env = "gnu"))']
pkg-url = "{ repo }/releases/download/v{ version }/{ name }-{ version }-{ target }.tar.gz"
bin-dir = "{ bin }{ binary-ext }"
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (bug_risk): The bin-dir value in the cargo-binstall metadata looks like a file path instead of a directory, which may break installation.

In cargo-binstall metadata, bin-dir should point to the directory inside the archive that contains the binary (e.g. "." or "bin"), not the full binary path. With bin-dir = "{ bin }{ binary-ext }", this expands to the full filename, while your .tar.gz places the binary at the archive root. To match the archive layout and let cargo-binstall find the binary, this should be bin-dir = "." (or another actual directory path if you change the archive structure).

pkg-fmt = "tgz"

[dependencies]
anyhow = "1"
clap = { version = "4", features = ["derive"] }
Expand Down
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,13 @@ Install via Cargo:
cargo install mdtablefix
```

On Linux `x86_64-unknown-linux-gnu` and `aarch64-unknown-linux-gnu`, install
the prebuilt release archive via `cargo-binstall`:

```bash
cargo binstall mdtablefix
```

Or clone the repository and build from source:

```bash
Expand Down
22 changes: 14 additions & 8 deletions docs/release-process.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Release Process

This project publishes prebuilt binaries for multiple operating systems and
architectures.
architectures. It also publishes `cargo-binstall` archives for the supported
Linux release targets.

The project targets the stable Rust `1.89.0` toolchain, as specified in
`rust-toolchain.toml`.
Expand All @@ -14,10 +15,7 @@ The GitHub Actions workflow `.github/workflows/release.yml` builds and uploads
binaries for:

- Linux (x86_64 and aarch64)
- FreeBSD (x86_64 and aarch64)
- macOS (x86_64 and aarch64)
- Windows (x86_64 and aarch64)
- OpenBSD (x86_64 and aarch64)
- FreeBSD (x86_64)

Releases start from tags named `v<major>.<minor>.<patch>`. The workflow checks
that the tag's version, without the leading `v`, matches the `Cargo.toml`
Expand All @@ -26,20 +24,28 @@ that the tag's version, without the leading `v`, matches the `Cargo.toml`
Each binary is named using the pattern `mdtablefix-<os>-<arch>` with an `.exe`
suffix on Windows.

For Linux `x86_64-unknown-linux-gnu` and `aarch64-unknown-linux-gnu`, the
workflow also produces `cargo-binstall` archives named
`mdtablefix-<version>-<target>.tar.gz`. Each archive contains the `mdtablefix`
binary at the archive root, matching the `Cargo.toml`
`[package.metadata.binstall]` configuration.

Binaries are uploaded as soon as they are built, so they are available from the
workflow run while other targets build.

## Workflow details

The `release.yml` workflow defines a matrix of operating system and
architecture combinations. Each entry includes the target triple used by
`cross` and a filename extension for Windows. During the build job, `cross`
compiles a release binary for every matrix row.
`cross` and whether the target also needs a `cargo-binstall` archive. During
the build job, `cross` compiles a release binary for every matrix row.

`cross` is installed from a specific git tag to avoid unexpected behaviour from
its main branch. Each binary is placed in an `artifacts/<os>-<arch>` directory
using the naming pattern `mdtablefix-<os>-<arch>[.exe]`. An SHA-256 checksum is
written alongside each binary for download verification.
written alongside each binary for download verification. The Linux
`cargo-binstall` targets additionally produce
`mdtablefix-<version>-<target>.tar.gz` plus a matching SHA-256 checksum.

After every build completes, the artefact is uploaded so that the GitHub
Actions interface provides it immediately. Once the matrix has finished, the
Expand Down
166 changes: 102 additions & 64 deletions src/code_emphasis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
//! transformation should run before wrapping and footnote conversion so marker
//! adjacency is evaluated on the raw input.

use std::{iter::Peekable, vec::IntoIter};

use crate::{
textproc::process_text,
wrap::{Token, tokenize_markdown},
Expand Down Expand Up @@ -49,6 +51,103 @@ fn push_code(code: &str, out: &mut String) {
out.push_str(&fence);
}

fn has_code_emphasis_adjacent(source: &str) -> bool {
source.contains("`*") || source.contains("`_") || source.contains("*`") || source.contains("_`")
}

fn handle_text_token<'a>(
raw: &'a str,
next: Option<&Token<'a>>,
out: &mut String,
pending: &mut &'a str,
) {
if !next.is_some_and(|token| matches!(token, Token::Code { .. })) {
out.push_str(raw);
return;
}

let (lead, body, trail) = split_marks(raw);
if body.is_empty() && trail.is_empty() {
*pending = lead;
return;
}

out.push_str(lead);
out.push_str(body);
*pending = trail;
}

fn try_fold_matching_emphasis<'a>(
tokens: &mut Peekable<IntoIter<Token<'a>>>,
pending: &mut &'a str,
code: &str,
out: &mut String,
) -> bool {
let Some(Token::Text(next)) = tokens.peek() else {
return false;
};
let (lead, mid, trail) = split_marks(next);
if *pending == lead && mid.is_empty() && trail.is_empty() {
out.push_str(pending);
push_code(code, out);
out.push_str(lead);
*pending = "";
tokens.next();
return true;
}
false
}

fn consume_code_affixes<'a>(
tokens: &mut Peekable<IntoIter<Token<'a>>>,
pending: &mut &'a str,
) -> (&'a str, &'a str, bool) {
let mut prefix = std::mem::take(pending);
let mut suffix = "";
let mut modified = !prefix.is_empty();

let Some(Token::Text(next)) = tokens.peek_mut() else {
return (prefix, suffix, modified);
};

let (lead, mid, _) = split_marks(next);
if lead.is_empty() {
return (prefix, suffix, modified);
}

modified = true;
if prefix.is_empty() {
prefix = lead;
} else if mid.is_empty() {
suffix = lead;
} else {
prefix = "";
}
*next = &next[lead.len()..];
(prefix, suffix, modified)
}
Comment on lines +101 to +128
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Search for test cases covering consume_code_affixes edge cases
rg -n "consume_code_affixes\|code_emphasis" --type rust -A 5 -B 2 src/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 43


🏁 Script executed:

# First, let's confirm the file exists and check the repository structure
fd -type f -name "*.rs" | head -20

Repository: leynos/mdtablefix

Length of output: 231


🏁 Script executed:

# Search more broadly for the file and function
git ls-files | grep -E "(code|emphasis)"

Repository: leynos/mdtablefix

Length of output: 156


🏁 Script executed:

# Look for test files that might contain tests for this function
git ls-files | grep -i test

Repository: leynos/mdtablefix

Length of output: 3034


🏁 Script executed:

# Check the size of the source file
wc -l src/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 85


🏁 Script executed:

# Read the source file
cat -n src/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 8421


🏁 Script executed:

# Check test file size and read it
wc -l tests/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 87


🏁 Script executed:

# Read the test file
cat -n tests/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 5151


🏁 Script executed:

# Check if there are test files with mixed emphasis patterns
grep -r "\\*\\*" tests/code_emphasis.rs tests/data/ | head -20

Repository: leynos/mdtablefix

Length of output: 2305


🏁 Script executed:

# Look for any edge case patterns in test data files
find tests/data -name "*emphasis*" -o -name "*code*" | xargs cat 2>/dev/null | head -50

Repository: leynos/mdtablefix

Length of output: 43


🏁 Script executed:

# Let's create a focused test to understand the edge case
cat > /tmp/test_edge_case.rs << 'EOF'
// Testing the specific edge case:
// What happens when prefix is non-empty AND mid is non-empty?
// 
// Example input that would trigger this:
// `code` followed by **bold** (where ** comes with text)

// split_marks("**bold**") returns ("**", "bold", "**")
// If pending = "**" from previous token
// Then: prefix="**" (non-empty)
//       lead="**" (non-empty)
//       mid="bold" (non-empty)
// This hits line 124: prefix = ""

// The question: is this the correct handling?
EOF
cat /tmp/test_edge_case.rs

Repository: leynos/mdtablefix

Length of output: 539


🏁 Script executed:

# Search for any documentation about the intended behaviour
grep -r "mixed" src/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 43


🏁 Script executed:

# Look at the module-level documentation more carefully
head -20 src/code_emphasis.rs

Repository: leynos/mdtablefix

Length of output: 868


🏁 Script executed:

# Let me trace through a specific example to verify the logic
cat > /tmp/trace_logic.py << 'EOF'
# Simulating the consume_code_affixes logic

def split_marks(s):
    """Split emphasis markers at both ends"""
    first = next((i for i, c in enumerate(s) if c not in '*_'), len(s))
    last = next((i for i in range(len(s)-1, -1, -1) if s[i] not in '*_'), first-1) + 1
    if last <= first:
        last = first
    return s[:first], s[first:last], s[last:]

def consume_code_affixes_trace(pending_in, next_text):
    """Trace through the logic with examples"""
    prefix = pending_in
    suffix = ""
    modified = bool(pending_in)
    
    print(f"Input: pending='{pending_in}', next_text='{next_text}'")
    
    if not next_text:
        print(f"  No next text token")
        return prefix, suffix, modified
    
    lead, mid, trail = split_marks(next_text)
    print(f"  split_marks('{next_text}') = ('{lead}', '{mid}', '{trail}')")
    
    if not lead:
        print(f"  No leading marks, return early")
        return prefix, suffix, modified
    
    modified = True
    print(f"  Set modified=True")
    
    if not prefix:
        prefix = lead
        print(f"  prefix was empty, set prefix='{lead}'")
    elif not mid:
        suffix = lead
        print(f"  mid was empty, set suffix='{lead}'")
    else:
        prefix = ""
        print(f"  EDGE CASE: mid is not empty, reset prefix to '' (avoid mixed markers)")
    
    print(f"  Result: prefix='{prefix}', suffix='{suffix}', modified={modified}")
    return prefix, suffix, modified

# Test cases
print("=== Case 1: No pending, plain text ===")
consume_code_affixes_trace("", "text")

print("\n=== Case 2: Pending markers, next text is all markers ===")
consume_code_affixes_trace("**", "**")

print("\n=== Case 3: EDGE CASE - Pending markers, next text has markers AND content ===")
consume_code_affixes_trace("**", "**more**")

print("\n=== Case 4: EDGE CASE - Pending underscore, next text has asterisks ===")
consume_code_affixes_trace("_", "*more*")
EOF
python3 /tmp/trace_logic.py

Repository: leynos/mdtablefix

Length of output: 1048


🏁 Script executed:

# Let's create a comprehensive test to verify the actual behaviour with the example cases
cat > /tmp/verify_behavior.py << 'EOF'
# Test the actual scenario the review mentions

def trace_scenario(input_str, label):
    print(f"\n=== {label} ===")
    print(f"Input: {repr(input_str)}")
    # The tokenizer would break this into tokens
    # Then consume_code_affixes logic applies
    # We need to understand what the expected output should be
    print(f"(Requires understanding of tokenization)")

trace_scenario("**code**", "Simple case: both leading and trailing emphasis")
trace_scenario("`code`**text**", "Code followed by emphasis and text")
trace_scenario("**`code`**", "Emphasis wrapping code (no affixes needed)")
trace_scenario("`code`**", "Code followed by emphasis (no following text)")

# The review asks: does the edge case (line 124: prefix = "") handle **code** correctly?
# But **code** without surrounding backticks wouldn't be tokenized as code
# The function only processes Text tokens adjacent to Code tokens

# The actual concern seems to be: when there's pending emphasis from a Text token
# followed by a Code token, and then another Text token with both markers and content,
# should we keep or discard the pending emphasis?

print("\n=== Analysis of edge case ===")
print("When prefix is non-empty AND mid is non-empty (line 124):")
print("- prefix = '' clears the pending emphasis markers")
print("- modified = True still wraps the code")
print("- This avoids: **`code`**more** (ambiguous nesting)")
print("- Instead: `code`**more** (next text handles its own markers)")
EOF
python3 /tmp/verify_behavior.py

Repository: leynos/mdtablefix

Length of output: 804


🏁 Script executed:

# Let me check if there's test coverage specifically for the edge case scenario
grep -n "prefix\|mid\|suffix" src/code_emphasis.rs | head -20

Repository: leynos/mdtablefix

Length of output: 766


🏁 Script executed:

# Let me check if there are any tests that would trigger the line 124 edge case
# by looking for test inputs with pending emphasis + content + markers

cat > /tmp/analyze_tests.py << 'EOF'
# Analyzing test coverage for the edge case

test_cases = [
    ("`StepContext`** Enhancement (in **`crates/rstest-bdd/src/context.rs`**)**", 
     "Complex case with nested code and emphasis"),
    ("**`code`**", "Emphasis only around code"),
    ("`code` text", "Standalone code"),
]

print("Test Coverage Analysis:")
print("=" * 60)

for input_str, description in test_cases:
    print(f"\nTest: {description}")
    print(f"Input: {repr(input_str)}")
    
    # The edge case occurs when:
    # 1. There's pending emphasis from previous text token (prefix non-empty)
    # 2. Next token is code
    # 3. After code, there's text with BOTH markers AND content
    #    (i.e., split_marks returns non-empty lead AND non-empty mid)
    
    if "**" in input_str and "`" in input_str:
        print("  ✓ Contains both emphasis and code")
    
    # Check if it would trigger the edge case
    # The edge case: prefix non-empty AND mid non-empty
    # This happens when: pending emphasis + code + text with markers+content

print("\n" + "=" * 60)
print("\nEdge Case Scenario:")
print("- Pending emphasis markers from previous text token")
print("- Code token is processed")
print("- Next text token has split_marks returning:")
print("  - lead (markers): non-empty")
print("  - mid (content): non-empty")
print("  - This triggers line 124: prefix = ''")
print("\nNo explicit test case found for this specific scenario!")
EOF
python3 /tmp/analyze_tests.py

Repository: leynos/mdtablefix

Length of output: 809


Add explicit test coverage for the edge case where pending emphasis combines with code and marked content.

The logic at lines 119–125 intentionally handles three scenarios. When prefix is non-empty and the next text token's mid (content between markers) is also non-empty, the code resets prefix to empty (line 124). This prevents ambiguous marker nesting—avoiding patterns like **code**more**.

This behaviour is by design per the module documentation ("Mixed surrounding markers are left untouched"), but it lacks explicit test coverage. Add a test case triggering the scenario where pending emphasis from one text token combines with code followed by another text token containing both markers and content. This confirms the edge case handling is correct.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/code_emphasis.rs` around lines 101 - 128, Add a unit test that exercises
the edge case in consume_code_affixes where a non-empty pending prefix combines
with a next Text token whose split_marks(mid) is non-empty so the function
resets prefix to empty (the branch with "else { prefix = \"\"; }"). Construct
tokens (using the Token enum and IntoIter<Token<'a>> in the same way other tests
do) so pending is a non-empty marker string, the next Token::Text contains
lead+mid+suffix (i.e. both markers and content), and assert the returned tuple
is ("" /*empty prefix*/, lead or suffix as appropriate, true) and that the next
text is sliced by lead.len(); place the test alongside existing tests for
code_emphasis.rs to cover the documented "Mixed surrounding markers are left
untouched" behavior.


fn handle_code_token<'a>(
tokens: &mut Peekable<IntoIter<Token<'a>>>,
code_token: (&'a str, &'a str),
out: &mut String,
pending: &mut &'a str,
) {
let (raw, code) = code_token;
if !pending.is_empty() && try_fold_matching_emphasis(tokens, pending, code, out) {
return;
}

let (prefix, suffix, modified) = consume_code_affixes(tokens, pending);
out.push_str(prefix);
if modified {
push_code(code, out);
} else {
out.push_str(raw);
}
out.push_str(suffix);
}

/// Merge contiguous code and emphasis spans.
///
/// Groups of emphasis markers and inline code with no separating spaces are
Expand All @@ -75,78 +174,17 @@ pub fn fix_code_emphasis(lines: &[String]) -> Vec<String> {
return vec![String::new(); lines.len()];
}
let source = lines.join("\n");
if !source.contains("`*")
&& !source.contains("`_")
&& !source.contains("*`")
&& !source.contains("_`")
{
if !has_code_emphasis_adjacent(&source) {
return lines.to_vec();
}
let mut tokens = tokenize_markdown(&source).into_iter().peekable();
let mut out = String::new();
let mut pending = "";
while let Some(token) = tokens.next() {
match token {
Token::Text(raw) => {
if tokens
.peek()
.is_some_and(|t| matches!(t, Token::Code { .. }))
{
let (lead, body, trail) = split_marks(raw);
if body.is_empty() && trail.is_empty() {
pending = lead;
} else {
out.push_str(lead);
out.push_str(body);
pending = trail;
}
} else {
out.push_str(raw);
}
}
Token::Text(raw) => handle_text_token(raw, tokens.peek(), &mut out, &mut pending),
Token::Code { raw, code, .. } => {
if !pending.is_empty()
&& let Some(Token::Text(next)) = tokens.peek()
{
let (lead, mid, trail) = split_marks(next);
if mid.is_empty() && trail.is_empty() && lead == pending {
out.push_str(pending);
push_code(code, &mut out);
out.push_str(lead);
pending = "";
tokens.next();
continue;
}
}
let mut prefix = pending;
let mut suffix = "";
let mut modified = !pending.is_empty();
pending = "";
if let Some(Token::Text(next)) = tokens.peek_mut() {
let (lead, mid, _) = split_marks(next);
if !lead.is_empty() {
modified = true;
if prefix.is_empty() {
prefix = lead;
} else if mid.is_empty() {
suffix = lead;
} else {
prefix = "";
}
*next = &next[lead.len()..];
}
}
if !prefix.is_empty() {
out.push_str(prefix);
}
if modified {
push_code(code, &mut out);
} else {
out.push_str(raw);
}
if !suffix.is_empty() {
out.push_str(suffix);
}
handle_code_token(&mut tokens, (raw, code), &mut out, &mut pending);
}
Token::Fence(f) => out.push_str(f),
Token::Newline => out.push('\n'),
Expand Down
35 changes: 22 additions & 13 deletions src/fences.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,24 @@ fn attach_specifier_to_fence(fence_line: &str, specifier: &str, spec_indent: &st
format!("{final_indent}```{specifier}")
}

fn orphan_specifier_target(lines: &[String], start: usize) -> Option<usize> {
let mut index = start;
while index < lines.len() && lines[index].trim().is_empty() {
index += 1;
}
if index >= lines.len() || FENCE_RE.captures(&lines[index]).is_none() {
return None;
}
Some(index)
}

fn orphan_specifier_target_without_language(lines: &[String], start: usize) -> Option<usize> {
let target = orphan_specifier_target(lines, start)?;
let cap = FENCE_RE.captures(&lines[target])?;
let lang = cap.get(3).map_or("", |m| m.as_str());
is_null_lang(lang).then_some(target)
}

/// Attach orphaned language specifiers to opening fences.
///
/// After compressing fences, a language may appear on its own line directly
Expand Down Expand Up @@ -156,19 +174,10 @@ pub fn attach_orphan_specifiers(lines: &[String]) -> Vec<String> {
let (spec, indent) = normalize_specifier(line);
if ORPHAN_LANG_RE.is_match(&spec) && out.last().is_none_or(|l: &String| l.trim().is_empty())
{
let mut j = i + 1;
while j < lines.len() && lines[j].trim().is_empty() {
j += 1;
}
if j < lines.len()
&& let Some(cap) = FENCE_RE.captures(&lines[j])
{
let lang = cap.get(3).map_or("", |m| m.as_str());
if is_null_lang(lang) {
out.push(attach_specifier_to_fence(&lines[j], &spec, &indent));
i = j + 1;
continue;
}
if let Some(target) = orphan_specifier_target_without_language(lines, i + 1) {
out.push(attach_specifier_to_fence(&lines[target], &spec, &indent));
i = target + 1;
continue;
}
out.push(line.clone());
i += 1;
Expand Down
Loading
Loading