diff --git a/benchmarks/pandas/bench_dataframe_corr.py b/benchmarks/pandas/bench_dataframe_corr.py new file mode 100644 index 00000000..f724a4b2 --- /dev/null +++ b/benchmarks/pandas/bench_dataframe_corr.py @@ -0,0 +1,31 @@ +"""Benchmark: DataFrame correlation matrix on 10k-row x 5-column DataFrame""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 10_000 +WARMUP = 3 +ITERATIONS = 10 + +df = pd.DataFrame({ + "A": np.sin(np.arange(ROWS) * 0.01), + "B": np.cos(np.arange(ROWS) * 0.01), + "C": np.sin(np.arange(ROWS) * 0.02), + "D": np.cos(np.arange(ROWS) * 0.02), + "E": np.sin(np.arange(ROWS) * 0.03), +}) + +for _ in range(WARMUP): + df.corr() + +start = time.perf_counter() +for _ in range(ITERATIONS): + df.corr() +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "dataframe_corr", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_expanding_mean.py b/benchmarks/pandas/bench_expanding_mean.py new file mode 100644 index 00000000..536fd8b7 --- /dev/null +++ b/benchmarks/pandas/bench_expanding_mean.py @@ -0,0 +1,26 @@ +"""Benchmark: expanding mean on 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) +s = pd.Series(data) + +for _ in range(WARMUP): + s.expanding().mean() + +start = time.perf_counter() +for _ in range(ITERATIONS): + s.expanding().mean() +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "expanding_mean", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_melt.py b/benchmarks/pandas/bench_melt.py new file mode 100644 index 00000000..25284b6f --- /dev/null +++ b/benchmarks/pandas/bench_melt.py @@ -0,0 +1,29 @@ +"""Benchmark: melt (wide to long) on 10k-row DataFrame""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 10_000 +WARMUP = 3 +ITERATIONS = 10 + +df = pd.DataFrame({ + "A": np.arange(ROWS) * 0.1, + "B": np.arange(ROWS) * 0.2, + "C": np.arange(ROWS) * 0.3, +}) + +for _ in range(WARMUP): + df.melt(value_vars=["A", "B", "C"]) + +start = time.perf_counter() +for _ in range(ITERATIONS): + df.melt(value_vars=["A", "B", "C"]) +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "melt", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_min_max_normalize.py b/benchmarks/pandas/bench_min_max_normalize.py new file mode 100644 index 00000000..bb93847c --- /dev/null +++ b/benchmarks/pandas/bench_min_max_normalize.py @@ -0,0 +1,26 @@ +"""Benchmark: min-max normalization on 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) * 100 + 50 +s = pd.Series(data) + +for _ in range(WARMUP): + (s - s.min()) / (s.max() - s.min()) + +start = time.perf_counter() +for _ in range(ITERATIONS): + (s - s.min()) / (s.max() - s.min()) +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "min_max_normalize", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_pearson_corr.py b/benchmarks/pandas/bench_pearson_corr.py new file mode 100644 index 00000000..454aa7f4 --- /dev/null +++ b/benchmarks/pandas/bench_pearson_corr.py @@ -0,0 +1,28 @@ +"""Benchmark: Pearson correlation between two 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +a = np.sin(np.arange(ROWS) * 0.01) +b = np.cos(np.arange(ROWS) * 0.01) +sa = pd.Series(a) +sb = pd.Series(b) + +for _ in range(WARMUP): + sa.corr(sb) + +start = time.perf_counter() +for _ in range(ITERATIONS): + sa.corr(sb) +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "pearson_corr", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_rolling_std.py b/benchmarks/pandas/bench_rolling_std.py new file mode 100644 index 00000000..e4dd5099 --- /dev/null +++ b/benchmarks/pandas/bench_rolling_std.py @@ -0,0 +1,26 @@ +"""Benchmark: rolling standard deviation with window=100 on 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) +s = pd.Series(data) + +for _ in range(WARMUP): + s.rolling(100).std() + +start = time.perf_counter() +for _ in range(ITERATIONS): + s.rolling(100).std() +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "rolling_std", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_rolling_sum.py b/benchmarks/pandas/bench_rolling_sum.py new file mode 100644 index 00000000..1a04a3ec --- /dev/null +++ b/benchmarks/pandas/bench_rolling_sum.py @@ -0,0 +1,26 @@ +"""Benchmark: rolling sum with window=100 on 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) +s = pd.Series(data) + +for _ in range(WARMUP): + s.rolling(100).sum() + +start = time.perf_counter() +for _ in range(ITERATIONS): + s.rolling(100).sum() +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "rolling_sum", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_series_nlargest.py b/benchmarks/pandas/bench_series_nlargest.py new file mode 100644 index 00000000..39d07d73 --- /dev/null +++ b/benchmarks/pandas/bench_series_nlargest.py @@ -0,0 +1,26 @@ +"""Benchmark: nlargest on 100k-element Series (top 1000)""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) * 1000 +s = pd.Series(data) + +for _ in range(WARMUP): + s.nlargest(1000) + +start = time.perf_counter() +for _ in range(ITERATIONS): + s.nlargest(1000) +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "series_nlargest", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_series_rank.py b/benchmarks/pandas/bench_series_rank.py new file mode 100644 index 00000000..378445ac --- /dev/null +++ b/benchmarks/pandas/bench_series_rank.py @@ -0,0 +1,26 @@ +"""Benchmark: Series rank on 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) * 1000 +s = pd.Series(data) + +for _ in range(WARMUP): + s.rank() + +start = time.perf_counter() +for _ in range(ITERATIONS): + s.rank() +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "series_rank", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_to_csv.py b/benchmarks/pandas/bench_to_csv.py new file mode 100644 index 00000000..c2e0298a --- /dev/null +++ b/benchmarks/pandas/bench_to_csv.py @@ -0,0 +1,30 @@ +"""Benchmark: to_csv — serialize a 10k-row DataFrame to CSV string""" +import json, time +import numpy as np +import pandas as pd +import io + +ROWS = 10_000 +WARMUP = 3 +ITERATIONS = 10 + +df = pd.DataFrame({ + "id": np.arange(ROWS, dtype=float), + "value": np.arange(ROWS) * 1.1, + "score": np.sin(np.arange(ROWS) * 0.01), +}) + +for _ in range(WARMUP): + df.to_csv(index=False) + +start = time.perf_counter() +for _ in range(ITERATIONS): + df.to_csv(index=False) +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "to_csv", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_to_json.py b/benchmarks/pandas/bench_to_json.py new file mode 100644 index 00000000..d76578da --- /dev/null +++ b/benchmarks/pandas/bench_to_json.py @@ -0,0 +1,29 @@ +"""Benchmark: to_json — serialize a 10k-row DataFrame to JSON string""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 10_000 +WARMUP = 3 +ITERATIONS = 10 + +df = pd.DataFrame({ + "id": np.arange(ROWS, dtype=float), + "value": np.arange(ROWS) * 1.1, + "score": np.sin(np.arange(ROWS) * 0.01), +}) + +for _ in range(WARMUP): + df.to_json(orient="records") + +start = time.perf_counter() +for _ in range(ITERATIONS): + df.to_json(orient="records") +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "to_json", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/pandas/bench_zscore.py b/benchmarks/pandas/bench_zscore.py new file mode 100644 index 00000000..b6050e5a --- /dev/null +++ b/benchmarks/pandas/bench_zscore.py @@ -0,0 +1,26 @@ +"""Benchmark: zscore normalization on 100k-element Series""" +import json, time +import numpy as np +import pandas as pd + +ROWS = 100_000 +WARMUP = 3 +ITERATIONS = 10 + +data = np.sin(np.arange(ROWS) * 0.01) * 100 + 50 +s = pd.Series(data) + +for _ in range(WARMUP): + (s - s.mean()) / s.std() + +start = time.perf_counter() +for _ in range(ITERATIONS): + (s - s.mean()) / s.std() +total = (time.perf_counter() - start) * 1000 + +print(json.dumps({ + "function": "zscore", + "mean_ms": total / ITERATIONS, + "iterations": ITERATIONS, + "total_ms": total, +})) diff --git a/benchmarks/tsb/bench_dataframe_corr.ts b/benchmarks/tsb/bench_dataframe_corr.ts new file mode 100644 index 00000000..40e9cf4b --- /dev/null +++ b/benchmarks/tsb/bench_dataframe_corr.ts @@ -0,0 +1,35 @@ +/** + * Benchmark: DataFrame correlation matrix on 10k-row x 5-column DataFrame + */ +import { DataFrame, dataFrameCorr } from "../../src/index.js"; + +const ROWS = 10_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const df = new DataFrame({ + A: Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)), + B: Float64Array.from({ length: ROWS }, (_, i) => Math.cos(i * 0.01)), + C: Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.02)), + D: Float64Array.from({ length: ROWS }, (_, i) => Math.cos(i * 0.02)), + E: Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.03)), +}); + +for (let i = 0; i < WARMUP; i++) { + dataFrameCorr(df); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + dataFrameCorr(df); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "dataframe_corr", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_expanding_mean.ts b/benchmarks/tsb/bench_expanding_mean.ts new file mode 100644 index 00000000..4ea94a4a --- /dev/null +++ b/benchmarks/tsb/bench_expanding_mean.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: expanding mean on 100k-element Series + */ +import { Series } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + s.expanding().mean(); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + s.expanding().mean(); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "expanding_mean", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_melt.ts b/benchmarks/tsb/bench_melt.ts new file mode 100644 index 00000000..f30243ac --- /dev/null +++ b/benchmarks/tsb/bench_melt.ts @@ -0,0 +1,32 @@ +/** + * Benchmark: melt (wide to long) on 10k-row DataFrame + */ +import { DataFrame, melt } from "../../src/index.js"; + +const ROWS = 10_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const a = Float64Array.from({ length: ROWS }, (_, i) => i * 0.1); +const b = Float64Array.from({ length: ROWS }, (_, i) => i * 0.2); +const c = Float64Array.from({ length: ROWS }, (_, i) => i * 0.3); +const df = new DataFrame({ A: a, B: b, C: c }); + +for (let i = 0; i < WARMUP; i++) { + melt(df, { value_vars: ["A", "B", "C"] }); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + melt(df, { value_vars: ["A", "B", "C"] }); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "melt", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_min_max_normalize.ts b/benchmarks/tsb/bench_min_max_normalize.ts new file mode 100644 index 00000000..35267b26 --- /dev/null +++ b/benchmarks/tsb/bench_min_max_normalize.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: min-max normalization on 100k-element Series + */ +import { Series, minMaxNormalize } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01) * 100 + 50); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + minMaxNormalize(s); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + minMaxNormalize(s); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "min_max_normalize", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_pearson_corr.ts b/benchmarks/tsb/bench_pearson_corr.ts new file mode 100644 index 00000000..6563c5a5 --- /dev/null +++ b/benchmarks/tsb/bench_pearson_corr.ts @@ -0,0 +1,32 @@ +/** + * Benchmark: Pearson correlation between two 100k-element Series + */ +import { Series, pearsonCorr } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const a = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)); +const b = Float64Array.from({ length: ROWS }, (_, i) => Math.cos(i * 0.01)); +const sa = new Series(a); +const sb = new Series(b); + +for (let i = 0; i < WARMUP; i++) { + pearsonCorr(sa, sb); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + pearsonCorr(sa, sb); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "pearson_corr", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_rolling_std.ts b/benchmarks/tsb/bench_rolling_std.ts new file mode 100644 index 00000000..2cd7d8cc --- /dev/null +++ b/benchmarks/tsb/bench_rolling_std.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: rolling standard deviation with window=100 on 100k-element Series + */ +import { Series } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + s.rolling(100).std(); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + s.rolling(100).std(); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "rolling_std", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_rolling_sum.ts b/benchmarks/tsb/bench_rolling_sum.ts new file mode 100644 index 00000000..e5104998 --- /dev/null +++ b/benchmarks/tsb/bench_rolling_sum.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: rolling sum with window=100 on 100k-element Series + */ +import { Series } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + s.rolling(100).sum(); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + s.rolling(100).sum(); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "rolling_sum", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_series_nlargest.ts b/benchmarks/tsb/bench_series_nlargest.ts new file mode 100644 index 00000000..faab5fd3 --- /dev/null +++ b/benchmarks/tsb/bench_series_nlargest.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: nlargest on 100k-element Series (top 1000) + */ +import { Series, nlargestSeries } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01) * 1000); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + nlargestSeries(s, 1000); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + nlargestSeries(s, 1000); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "series_nlargest", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_series_rank.ts b/benchmarks/tsb/bench_series_rank.ts new file mode 100644 index 00000000..10b05127 --- /dev/null +++ b/benchmarks/tsb/bench_series_rank.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: Series rank on 100k-element Series + */ +import { Series, rankSeries } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01) * 1000); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + rankSeries(s); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + rankSeries(s); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "series_rank", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_to_csv.ts b/benchmarks/tsb/bench_to_csv.ts new file mode 100644 index 00000000..fb1ce422 --- /dev/null +++ b/benchmarks/tsb/bench_to_csv.ts @@ -0,0 +1,33 @@ +/** + * Benchmark: toCsv — serialize a 10k-row DataFrame to CSV string + */ +import { DataFrame, toCsv } from "../../src/index.js"; + +const ROWS = 10_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const df = new DataFrame({ + id: Float64Array.from({ length: ROWS }, (_, i) => i), + value: Float64Array.from({ length: ROWS }, (_, i) => i * 1.1), + score: Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)), +}); + +for (let i = 0; i < WARMUP; i++) { + toCsv(df); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + toCsv(df); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "to_csv", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_to_json.ts b/benchmarks/tsb/bench_to_json.ts new file mode 100644 index 00000000..ed8c22a2 --- /dev/null +++ b/benchmarks/tsb/bench_to_json.ts @@ -0,0 +1,33 @@ +/** + * Benchmark: toJson — serialize a 10k-row DataFrame to JSON string + */ +import { DataFrame, toJson } from "../../src/index.js"; + +const ROWS = 10_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const df = new DataFrame({ + id: Float64Array.from({ length: ROWS }, (_, i) => i), + value: Float64Array.from({ length: ROWS }, (_, i) => i * 1.1), + score: Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01)), +}); + +for (let i = 0; i < WARMUP; i++) { + toJson(df); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + toJson(df); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "to_json", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +); diff --git a/benchmarks/tsb/bench_zscore.ts b/benchmarks/tsb/bench_zscore.ts new file mode 100644 index 00000000..6e856325 --- /dev/null +++ b/benchmarks/tsb/bench_zscore.ts @@ -0,0 +1,30 @@ +/** + * Benchmark: zscore normalization on 100k-element Series + */ +import { Series, zscore } from "../../src/index.js"; + +const ROWS = 100_000; +const WARMUP = 3; +const ITERATIONS = 10; + +const data = Float64Array.from({ length: ROWS }, (_, i) => Math.sin(i * 0.01) * 100 + 50); +const s = new Series(data); + +for (let i = 0; i < WARMUP; i++) { + zscore(s); +} + +const start = performance.now(); +for (let i = 0; i < ITERATIONS; i++) { + zscore(s); +} +const total = performance.now() - start; + +console.log( + JSON.stringify({ + function: "zscore", + mean_ms: total / ITERATIONS, + iterations: ITERATIONS, + total_ms: total, + }), +);