diff --git a/playground/at_iat.html b/playground/at_iat.html new file mode 100644 index 00000000..208da4d3 --- /dev/null +++ b/playground/at_iat.html @@ -0,0 +1,88 @@ + + + + + + at_iat — tsb playground + + + + ← Back to playground index +

at_iat — fast scalar access for Series and DataFrame

+

+ Fast single-cell accessors that mirror the pandas .at and .iat + indexers. Use these when you need a single scalar value — they are clearer and faster + than .loc / .iloc for single-element access. +

+ +

seriesAt — access by label

+

Python pandas equivalent:

+
import pandas as pd
+s = pd.Series([10, 20, 30], index=["a", "b", "c"])
+s.at["b"]   # 20
+
+

tsb equivalent:

+
import { Series, seriesAt } from "tsb";
+
+const s = new Series({ data: [10, 20, 30], index: ["a", "b", "c"] });
+seriesAt(s, "b");   // 20
+seriesAt(s, "a");   // 10
+
+ +

seriesIat — access by integer position

+

Python pandas equivalent:

+
s.iat[2]   # 30
+s.iat[-1]  # 30  (negative indexing)
+
+

tsb equivalent:

+
import { seriesIat } from "tsb";
+
+seriesIat(s, 2);   // 30
+seriesIat(s, -1);  // 30
+
+ +

dataFrameAt — access by row label and column name

+

Python pandas equivalent:

+
df = pd.DataFrame({"x": [1, 2], "y": [3, 4]}, index=["r0", "r1"])
+df.at["r1", "x"]   # 2
+
+

tsb equivalent:

+
import { DataFrame, dataFrameAt } from "tsb";
+
+const df = DataFrame.fromColumns(
+  { x: [1, 2], y: [3, 4] },
+  { index: ["r0", "r1"] },
+);
+dataFrameAt(df, "r1", "x");   // 2
+dataFrameAt(df, "r0", "y");   // 3
+
+ +

dataFrameIat — access by integer row and column position

+

Python pandas equivalent:

+
df.iat[0, 1]   # 3  (row 0, col 1 = "y")
+df.iat[1, -1]  # 4  (last col)
+
+

tsb equivalent:

+
import { dataFrameIat } from "tsb";
+
+dataFrameIat(df, 0, 1);   // 3  (row 0, column index 1 = "y")
+dataFrameIat(df, 1, -1);  // 4  (last column, row 1)
+
+ +

Summary

+
// seriesAt(s, label)             — label-based scalar access
+// seriesIat(s, i)                — position-based scalar access
+// dataFrameAt(df, rowLabel, col) — label × label scalar access
+// dataFrameIat(df, rowInt, col)  — position × position scalar access
+
+ + diff --git a/playground/between.html b/playground/between.html new file mode 100644 index 00000000..2f907daa --- /dev/null +++ b/playground/between.html @@ -0,0 +1,110 @@ + + + + + + between — tsb playground + + + + ← Back to playground index +

between

+

+ Element-wise range check: returns a boolean Series indicating whether each value lies + within [left, right]. Mirrors pandas.Series.between. +

+ +

seriesBetween — inclusive="both" (default)

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([1, 2, 3, 4, 5])
+print(s.between(2, 4))
+# 0    False
+# 1     True
+# 2     True
+# 3     True
+# 4    False
+# dtype: bool
+
+

tsb equivalent:

+
import { Series, seriesBetween } from "tsb";
+
+const s = new Series({ data: [1, 2, 3, 4, 5] });
+seriesBetween(s, 2, 4).values;
+// [false, true, true, true, false]
+
+ +

Inclusive options

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([1, 2, 3, 4, 5])
+
+s.between(2, 4, inclusive="left").tolist()
+# [False, True, True, False, False]
+
+s.between(2, 4, inclusive="right").tolist()
+# [False, False, True, True, False]
+
+s.between(2, 4, inclusive="neither").tolist()
+# [False, False, True, False, False]
+
+

tsb equivalent:

+
import { Series, seriesBetween } from "tsb";
+
+const s = new Series({ data: [1, 2, 3, 4, 5] });
+
+seriesBetween(s, 2, 4, { inclusive: "left" }).values;
+// [false, true, true, false, false]
+
+seriesBetween(s, 2, 4, { inclusive: "right" }).values;
+// [false, false, true, true, false]
+
+seriesBetween(s, 2, 4, { inclusive: "neither" }).values;
+// [false, false, true, false, false]
+
+ +

Missing values

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+s = pd.Series([1, None, np.nan, 4])
+s.between(0, 5).tolist()
+# [True, False, False, True]
+
+

tsb equivalent:

+
import { Series, seriesBetween } from "tsb";
+
+const s = new Series({ data: [1, null, NaN, 4] });
+seriesBetween(s, 0, 5).values;
+// [true, false, false, true]
+
+ +

String comparison

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series(["apple", "banana", "cherry", "date"])
+s.between("banana", "cherry").tolist()
+# [False, True, True, False]
+
+

tsb equivalent:

+
import { Series, seriesBetween } from "tsb";
+
+const s = new Series({ data: ["apple", "banana", "cherry", "date"] });
+seriesBetween(s, "banana", "cherry").values;
+// [false, true, true, false]
+
+ + diff --git a/playground/combine.html b/playground/combine.html new file mode 100644 index 00000000..7ef22b7c --- /dev/null +++ b/playground/combine.html @@ -0,0 +1,109 @@ + + + + + + combine — Element-wise Combination — tsb playground + + + +

combine — Element-wise Combination

+

+ combineSeries(a, b, func) and combineDataFrame(a, b, func) + combine two objects element-wise using a caller-supplied binary function. + The result index is the union of both indices; a + fillValue (default null) is used when only one + side has a value for a given label. +

+ +

Interactive Demo

+ + + + +
Click a button above to run an example.
+ +

Code Examples

+
import { Series, DataFrame, combineSeries, combineDataFrame } from "tsb";
+
+// ── Series ──────────────────────────────────────────────────────────────────
+const a = new Series({ data: [1, 5, 3], index: [0, 1, 2] });
+const b = new Series({ data: [10, 2, 30], index: [0, 1, 2] });
+
+// Element-wise max
+combineSeries(a, b, (x, y) => Math.max(x, y)).values; // [10, 5, 30]
+
+// Union index with fillValue=0
+const c = new Series({ data: [1, 2], index: ["x", "y"] });
+const d = new Series({ data: [10, 30], index: ["x", "z"] });
+combineSeries(c, d, (x, y) => (x ?? 0) + (y ?? 0), 0).values;
+// x:11, y:2, z:30
+
+// ── DataFrame ───────────────────────────────────────────────────────────────
+const df1 = DataFrame.fromColumns({ a: [1, 5], b: [100, 200] });
+const df2 = DataFrame.fromColumns({ a: [10, 2], c: [1000, 2000] });
+
+// Shared column "a": element-wise min; unshared columns processed with fillValue
+combineDataFrame(df1, df2, (p, q) => Math.min(p ?? Infinity, q ?? Infinity));
+
+// overwrite: false — unshared columns preserved as-is
+combineDataFrame(df1, df2, (p, q) => Math.min(p ?? Infinity, q ?? Infinity),
+  { overwrite: false });
+
+ + + + diff --git a/playground/corrwith.html b/playground/corrwith.html new file mode 100644 index 00000000..27915b1c --- /dev/null +++ b/playground/corrwith.html @@ -0,0 +1,152 @@ + + + + + + corrwith / autoCorr — tsb playground + + + + ← Back to playground index +

corrWith / autoCorr — Pairwise Correlation & Autocorrelation

+

+ Compute pairwise Pearson correlations between a DataFrame and a Series or + another DataFrame, and compute the lag-N autocorrelation of a numeric Series. + Mirrors pandas.DataFrame.corrwith() and + pandas.Series.autocorr(). +

+ +

autoCorr — lag-N autocorrelation

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([1, 2, 3, 4, 5])
+print(s.autocorr(lag=1))   # 1.0  (perfectly linear → perfect self-correlation)
+print(s.autocorr(lag=2))   # 1.0
+
+# Alternating sign series → -1 autocorrelation at lag 1
+s2 = pd.Series([1, -1, 1, -1, 1, -1])
+print(s2.autocorr(lag=1))  # -1.0
+
+# NaN when constant (zero variance)
+s3 = pd.Series([5, 5, 5, 5])
+print(s3.autocorr(lag=1))  # NaN
+
+

tsb equivalent:

+
import { Series, autoCorr } from "tsb";
+
+const s = new Series({ data: [1, 2, 3, 4, 5] });
+autoCorr(s);          // lag=1 → 1.0
+autoCorr(s, 0);       // lag=0 → 1.0  (always)
+autoCorr(s, 2);       // lag=2 → 1.0
+
+const alt = new Series({ data: [1, -1, 1, -1, 1, -1] });
+autoCorr(alt, 1);     // -1.0
+
+const flat = new Series({ data: [5, 5, 5, 5] });
+autoCorr(flat, 1);    // NaN
+
+ +

corrWith — DataFrame correlated with a Series

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame({
+    "A": [1, 2, 3, 4, 5],
+    "B": [5, 4, 3, 2, 1],
+})
+s = pd.Series([1, 2, 3, 4, 5])
+
+print(df.corrwith(s))
+# A    1.0
+# B   -1.0
+
+

tsb equivalent:

+
import { DataFrame, Series, corrWith } from "tsb";
+
+const df = DataFrame.fromColumns({
+  A: [1, 2, 3, 4, 5],
+  B: [5, 4, 3, 2, 1],
+});
+const s = new Series({ data: [1, 2, 3, 4, 5] });
+
+const result = corrWith(df, s);
+// Series(A=1.0, B=-1.0) indexed by column names
+
+ +

corrWith — DataFrame correlated with another DataFrame

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+df1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+df2 = pd.DataFrame({"A": [1, 2, 3], "B": [6, 5, 4]})
+
+print(df1.corrwith(df2))
+# A    1.0
+# B   -1.0
+
+# Columns in only one DataFrame get NaN (drop=False default)
+df3 = pd.DataFrame({"A": [1, 2, 3], "C": [7, 8, 9]})
+print(df1.corrwith(df3))
+# A    1.0
+# B    NaN
+# C    NaN
+
+# drop=True keeps only common columns
+print(df1.corrwith(df3, drop=True))
+# A    1.0
+
+

tsb equivalent:

+
import { DataFrame, corrWith } from "tsb";
+
+const df1 = DataFrame.fromColumns({ A: [1, 2, 3], B: [4, 5, 6] });
+const df2 = DataFrame.fromColumns({ A: [1, 2, 3], B: [6, 5, 4] });
+
+corrWith(df1, df2);                         // A=1.0, B=-1.0
+
+const df3 = DataFrame.fromColumns({ A: [1, 2, 3], C: [7, 8, 9] });
+corrWith(df1, df3);                         // A=1.0, B=NaN, C=NaN
+corrWith(df1, df3, { drop: true });         // A=1.0
+
+ +

corrWith — axis=1 (row-wise correlation)

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame({
+    "A": [1, 2],
+    "B": [2, 4],
+    "C": [3, 6],
+})
+s = pd.Series([1, 2, 3])
+
+# axis=1: correlate each ROW with the Series
+print(df.corrwith(s, axis=1))
+# 0    1.0
+# 1    1.0
+
+

tsb equivalent:

+
import { DataFrame, Series, corrWith } from "tsb";
+
+const df = DataFrame.fromColumns({ A: [1, 2], B: [2, 4], C: [3, 6] });
+const s = new Series({ data: [1, 2, 3] });
+
+corrWith(df, s, { axis: 1 });
+// Series([1.0, 1.0]) indexed by row labels [0, 1]
+
+ + + + diff --git a/playground/cut_bins_to_frame.html b/playground/cut_bins_to_frame.html new file mode 100644 index 00000000..43815fcd --- /dev/null +++ b/playground/cut_bins_to_frame.html @@ -0,0 +1,92 @@ + + + + + + cutBinsToFrame — tsb playground + + + +

cutBinsToFrame

+

+ cutBinsToFrame(result, { data }) converts the output of + cut() or qcut() into a summary DataFrame with + one row per bin, showing the bin label, edges, count, and frequency. +

+ +

Interactive Demo

+

+ + + +

+
Click "Run" to see the result.
+ +

What it does

+
import { cut, cutBinsToFrame, cutBinCounts, binEdges } from "tsb";
+
+// Bin 20 random values into 4 equal-width bins
+const data = Array.from({ length: 20 }, () => Math.random() * 100);
+const result = cut(data, 4);
+
+// Summary DataFrame: bin | left | right | count | frequency
+const df = cutBinsToFrame(result, { data });
+
+// Just the count dictionary
+const counts = cutBinCounts(result);
+// { "(0.0, 25.0]": 5, "(25.0, 50.0]": 6, ... }
+
+// Just edges indexed by label
+const edges = binEdges(result);
+
+ +

Related Functions

+ + + + + diff --git a/playground/dot_matmul.html b/playground/dot_matmul.html new file mode 100644 index 00000000..5054f8bb --- /dev/null +++ b/playground/dot_matmul.html @@ -0,0 +1,121 @@ + + + + + + dot_matmul — dot product & matrix multiply — tsb playground + + + +

dot_matmul — dot product & matrix multiplication

+

+ Dot product and matrix multiplication for Series and DataFrame. + Mirrors pandas.Series.dot() and pandas.DataFrame.dot(). + Index alignment is performed automatically (inner join on shared labels). +

+ +

API

+
+import { seriesDotSeries, seriesDotDataFrame, dataFrameDotSeries, dataFrameDotDataFrame } from "tsb";
+
+// Series · Series → scalar
+seriesDotSeries(a, b);
+
+// Series · DataFrame → Series
+seriesDotDataFrame(s, df);
+
+// DataFrame · Series → Series
+dataFrameDotSeries(df, s);
+
+// DataFrame · DataFrame → DataFrame
+dataFrameDotDataFrame(A, B);
+  
+ +

Interactive Demo

+ + + + +
Click a button above to run an example.
+ +

Examples

+
+// Series dot product
+const a = new Series({ data: [1, 2, 3], index: ["x","y","z"] });
+const b = new Series({ data: [4, 5, 6], index: ["x","y","z"] });
+seriesDotSeries(a, b); // 1*4 + 2*5 + 3*6 = 32
+
+// DataFrame · vector
+const df = DataFrame.fromColumns({ a: [1, 2], b: [3, 4] });
+const v  = new Series({ data: [1, 1], index: ["a", "b"] });
+dataFrameDotSeries(df, v).values; // [4, 6]  (row sums)
+
+// Matrix multiply
+const A = DataFrame.fromColumns({ k: [1, 2] });     // 2×1
+// ... B with row index ["k"] ...
+dataFrameDotDataFrame(A, B).col("r").values;         // [3, 6]
+  
+ + + + diff --git a/playground/eval_query.html b/playground/eval_query.html new file mode 100644 index 00000000..e4b31946 --- /dev/null +++ b/playground/eval_query.html @@ -0,0 +1,155 @@ + + + + + + tsb — DataFrame.query() and DataFrame.eval() + + + +

← tsb playground

+

DataFrame.query() and DataFrame.eval()

+

+ queryDataFrame and evalDataFrame let you filter rows or evaluate + expressions using a Python-pandas-style expression string. This mirrors + pandas.DataFrame.query() + and + pandas.DataFrame.eval(). +

+ +

Import

+
import { queryDataFrame, evalDataFrame, DataFrame } from "tsb";
+ +

queryDataFrame(df, expr)

+

Returns a new DataFrame containing only the rows where expr evaluates to truthy.

+
const df = DataFrame.fromArrays({
+  name:   ["Alice", "Bob", "Carol", "Dave"],
+  age:    [25, 32, 28, 45],
+  score:  [88, 72, 95, 60],
+  active: [true, false, true, true],
+});
+
+// Simple comparison
+queryDataFrame(df, "age > 28");
+// name: ["Bob", "Dave"]  age: [32, 45]  score: [72, 60]
+
+// Combined conditions
+queryDataFrame(df, "age < 35 and score >= 85");
+// name: ["Alice", "Carol"]
+
+// String equality
+queryDataFrame(df, "name == 'Alice'");
+// single row
+
+// 'in' operator
+queryDataFrame(df, "name in ['Alice', 'Carol']");
+
+// 'not in' operator
+queryDataFrame(df, "age not in [25, 45]");
+
+// Backtick-quoted column (for names with spaces)
+const df2 = DataFrame.fromArrays({ "first name": ["Alice", "Bob"] });
+queryDataFrame(df2, "`first name` == 'Alice'");
+ +

evalDataFrame(df, expr)

+

Evaluates an arithmetic or logical expression and returns a new Series.

+
const sales = DataFrame.fromArrays({
+  price: [10.0, 25.0, 5.0, 40.0],
+  qty:   [100,   50, 200,   10],
+});
+
+// Arithmetic expression → new Series
+evalDataFrame(sales, "price * qty");
+// Series [1000, 1250, 1000, 400]
+
+// Boolean expression (useful as a mask)
+evalDataFrame(sales, "price > 10");
+// Series [false, true, false, true]
+
+// Function calls
+evalDataFrame(sales, "round(price * qty / 100, 1)");
+// Series [10.0, 12.5, 10.0, 4.0]
+
+// String operations
+const df3 = DataFrame.fromArrays({ tag: ["Foo", "Bar", "Baz"] });
+evalDataFrame(df3, "lower(tag)");
+// Series ["foo", "bar", "baz"]
+ +

Supported Expression Syntax

+ +

Column references

+ + + + +
SyntaxExample
Bare identifierage
Backtick-quoted (spaces allowed)`first name`
+ +

Literals

+ + + + + + +
TypeExamples
Number42, 3.14, 1e6
String"hello", 'world'
BooleanTrue, False, true, false
NullNone, null, NaN
+ +

Operators

+ + + + + + +
CategoryOperators
Arithmetic+ - * / % **
Comparison== != < <= > >=
Logicaland or not
Membershipin [...], not in [...]
+ +

Built-in functions

+ + + + + + + + + + +
FunctionDescription
abs(x)Absolute value
round(x, d?)Round to d decimal places (default 0)
floor(x), ceil(x)Floor / ceiling
sqrt(x), log(x), log2(x), log10(x)Math functions
str(x), len(x)Convert to string / string length
lower(x), upper(x)String case conversion
isnull(x) / isna(x)True if null / NaN
notnull(x) / notna(x)True if not null
+ +

Pandas API comparison

+ + + + + + + +
pandastsb
df.query("col > 5")queryDataFrame(df, "col > 5")
df.eval("a + b")evalDataFrame(df, "a + b")
df.query("col in [1,2,3]")queryDataFrame(df, "col in [1, 2, 3]")
df.query("`col name` == 'x'")queryDataFrame(df, "`col name` == 'x'")
df.eval("func(col)")evalDataFrame(df, "abs(col)") (built-in functions)
+ +
+ Note: Unlike pandas, external variable substitution (@var) is not supported. + Use template literals to embed values: queryDataFrame(df, `age > ${minAge}`). +
+ +

Tips

+ + + diff --git a/playground/filter.html b/playground/filter.html new file mode 100644 index 00000000..d29bea35 --- /dev/null +++ b/playground/filter.html @@ -0,0 +1,109 @@ + + + + + + filter — tsb playground + + + + ← Back to playground index +

filter

+

+ Filter a DataFrame's rows or columns by label using exact names, substring matching, + or regular expressions. Mirrors pandas.DataFrame.filter. +

+ +

filterDataFrame — by items (column names)

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame({"one": [1, 2], "two": [3, 4], "three": [5, 6]})
+print(df.filter(items=["one", "three"]).columns.tolist())
+# ['one', 'three']
+
+

tsb equivalent:

+
import { DataFrame, filterDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({ one: [1, 2], two: [3, 4], three: [5, 6] });
+filterDataFrame(df, { items: ["one", "three"] }).columns.values;
+// ["one", "three"]
+
+ +

filterDataFrame — by like (substring)

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame({"apple": [1], "apricot": [2], "banana": [3]})
+print(df.filter(like="ap").columns.tolist())
+# ['apple', 'apricot']
+
+

tsb equivalent:

+
import { DataFrame, filterDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({ apple: [1], apricot: [2], banana: [3] });
+filterDataFrame(df, { like: "ap" }).columns.values;
+// ["apple", "apricot"]
+
+ +

filterDataFrame — by regex

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame({"a1": [1], "a2": [2], "b1": [3], "b2": [4]})
+print(df.filter(regex="^a").columns.tolist())
+# ['a1', 'a2']
+
+

tsb equivalent:

+
import { DataFrame, filterDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({ a1: [1], a2: [2], b1: [3], b2: [4] });
+filterDataFrame(df, { regex: "^a" }).columns.values;
+// ["a1", "a2"]
+
+ +

filterDataFrame — filter rows (axis=0)

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame(
+    {"x": [10, 20, 30]},
+    index=["foo", "bar", "baz"],
+)
+print(df.filter(like="ba", axis=0).index.tolist())
+# ['bar', 'baz']
+
+

tsb equivalent:

+
import { DataFrame, filterDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({ x: [10, 20, 30] }, { index: ["foo", "bar", "baz"] });
+filterDataFrame(df, { like: "ba", axis: 0 }).index.values;
+// ["bar", "baz"]
+
+ +

filterSeries — by label

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([1, 2, 3, 4], index=["alpha", "beta", "gamma", "aleph"])
+print(s.filter(like="al").index.tolist())
+# ['alpha', 'aleph']
+
+

tsb equivalent:

+
import { Series, filterSeries } from "tsb";
+
+const s = new Series({ data: [1, 2, 3, 4], index: ["alpha", "beta", "gamma", "aleph"] });
+filterSeries(s, { like: "al" }).index.values;
+// ["alpha", "aleph"]
+
+ + diff --git a/playground/index.html b/playground/index.html index 752db9ca..0597f9f2 100644 --- a/playground/index.html +++ b/playground/index.html @@ -289,6 +289,11 @@

✅ Complete +
+

🔎 query / eval

+

Filter rows or evaluate expressions using a pandas-style expression string. queryDataFrame(df, "col > 5 and label in ['a', 'b']") and evalDataFrame(df, "price * qty"). Supports arithmetic, comparisons, logical operators, membership tests, backtick-quoted column names, and built-in functions (abs, round, isnull, lower, …). Mirrors pandas.DataFrame.query and pandas.DataFrame.eval.

+
✅ Complete
+

🔍 isna / notna

Module-level missing-value detection: isna, notna, isnull, notnull work on scalars, arrays, Series, and DataFrames. Plus standalone fillna, dropna, countna, and countValid. Mirrors pandas.isna, pandas.notna, pandas.isnull, pandas.notnull.

@@ -359,10 +364,92 @@

✅ Complete

+
+

⏳ timedelta_range

+

Generate fixed-frequency TimedeltaIndex sequences. Supports start/end/periods/freq combinations, multiplier prefixes (e.g. "2H", "30min"), linear spacing, and closed endpoint control.

+
✅ Complete
+
+
+

🔍 strFindall & toJsonDenormalize

+

strFindall/strFindallCount/strFindFirst/strFindallExpand — regex match extraction per element (mirrors pandas str.findall). toJsonDenormalize/toJsonRecords/toJsonSplit/toJsonIndex — serialize DataFrames to nested or flat JSON.

+
✅ Complete
+
+
+

📊 cutBinsToFrame

+

Convert cut/qcut BinResult into a tidy summary DataFrame. cutBinsToFrame returns bin labels, edges, counts, and frequencies. cutBinCounts returns a label→count dict. binEdges returns an edges-only DataFrame.

+
✅ Complete
+
+
+

✂️ xs — Cross-Section

+

xsDataFrame / xsSeries — select rows or columns by label (mirrors pandas .xs()). Supports flat and MultiIndex, axis selection, level targeting, and dropLevel control.

+
✅ Complete
+
+
+

↔️ between — Range Check

+

seriesBetween — element-wise range check returning a boolean Series. Mirrors pandas Series.between(). Supports inclusive="both"|"left"|"right"|"neither".

+
✅ Complete
+
+
+

🔄 update — In-place Update

+

seriesUpdate / dataFrameUpdate — update values from another object using label alignment. Non-NA values in other overwrite self. Mirrors pandas DataFrame.update().

+
✅ Complete
+
+
+

🔽 filter — Filter Labels

+

filterDataFrame / filterSeries — filter rows or columns by label using items list, substring (like), or regex pattern. Mirrors pandas DataFrame.filter().

+
✅ Complete
+
+
+

🔀 combine — Element-wise Combination

+

combineSeries / combineDataFrame — combine two objects element-wise with a caller-supplied binary function. Result index is the union of both indices. Mirrors pandas Series.combine() / DataFrame.combine().

+
✅ Complete
+
+
+

✅ keepTrue / keepFalse / filterBy — Boolean Indexing

+

keepTrue / keepFalse / filterBy — boolean-mask selection helpers for Series and DataFrames. Mirrors pandas boolean indexing (series[mask], df[mask]).

+
✅ Complete
+
+
+

🔢 scalar_extract — squeeze / item / bool / first_valid_index

+

squeezeSeries / squeezeDataFrame / itemSeries / boolSeries / boolDataFrame / firstValidIndex / lastValidIndex — scalar-extraction helpers for Series and DataFrames. Mirrors pandas Series.squeeze(), item(), bool(), first_valid_index(), last_valid_index().

+
✅ Complete
+
+
+

📊 corrWith / autoCorr — Pairwise Correlation & Autocorrelation

+

corrWith / autoCorr — compute pairwise Pearson correlations between a DataFrame and a Series or DataFrame, and compute lag-N autocorrelation for a Series. Mirrors pandas DataFrame.corrwith() and Series.autocorr().

+
✅ Complete
+
+
+

🔗 join / joinAll / crossJoin — Label-Based Joins

+

join / joinAll / crossJoin — join DataFrames by index labels or a key column. join() defaults to left-join-on-index, joinAll() chains multiple joins, crossJoin() produces the Cartesian product. Mirrors pandas DataFrame.join().

+
✅ Complete
+
+
+

⏱️ merge_asof — Ordered Nearest-Key Join

+

mergeAsof — ordered left-join on the nearest key (backward/forward/nearest). Ideal for time-series: match trades to most recent quotes. Supports by-group matching, tolerance, allow_exact_matches, and custom suffixes. Mirrors pandas.merge_asof().

+
✅ Complete
+
+
+

📋 merge_ordered — Ordered Fill Merge

+

mergeOrdered — ordered outer/inner/left/right merge sorted by key column(s). Supports fill_method: "ffill" to forward-fill null gaps, left_by/right_by for group-wise ordered merging, left_on/right_on for different key names, and suffix handling. Mirrors pandas.merge_ordered().

+
✅ Complete
+
+
+

📅 resample — Time-Based Resampling

+

resampleSeries / resampleDataFrame — time-based groupby aggregation. Supports S/T/H/D/W/MS/ME/QS/QE/YS/YE frequencies, aggregations (sum, mean, min, max, count, first, last, std, var, size, ohlc), per-column agg specs, and automatic empty-bin filling. Mirrors pandas.DataFrame.resample().

+
✅ Complete
+
+
+

🔍 infer_objects / convert_dtypes — Dtype Inference

+

inferObjectsSeries / inferObjectsDataFrame / convertDtypesSeries / convertDtypesDataFrame — promote object-typed Series to better dtypes and parse string columns as numbers. Mirrors pandas infer_objects() and convert_dtypes().

+
✅ Complete
+
+
+

🧪 testing — Assertion Utilities

+

assertSeriesEqual / assertFrameEqual / assertIndexEqual — rich assertion helpers for use in test suites. Numeric tolerance, checkLike column-order mode, dtype checks, AssertionError with detailed diff messages. Mirrors pandas.testing.

+
✅ Complete
+
- -
-

Performance

⚡ Benchmarks

diff --git a/playground/infer_objects.html b/playground/infer_objects.html new file mode 100644 index 00000000..0dc49004 --- /dev/null +++ b/playground/infer_objects.html @@ -0,0 +1,152 @@ + + + + + + tsb — infer_objects / convert_dtypes + + + + ← Back to tsb playground +

infer_objects / convert_dtypes

+ +
+ pandas equivalent: + Series.infer_objects()  /  + DataFrame.infer_objects()  /  + Series.convert_dtypes()  /  + DataFrame.convert_dtypes() +
+ +

What it does

+

+ These utilities refine dtypes automatically — useful after reading data from + CSV/JSON where everything starts as object or string: +

+
    +
  • inferObjectsSeries — promotes an object-typed Series to a + more specific dtype (int, float, bool, string) when all values have a consistent type.
  • +
  • inferObjectsDataFrame — applies per-column inference to every column.
  • +
  • convertDtypesSeries — like inferObjectsSeries but also + parses string columns as numbers when possible.
  • +
  • convertDtypesDataFrame — per-column convertDtypesSeries.
  • +
+ +

inferObjectsSeries — promote object → typed

+
import { Series, Dtype, inferObjectsSeries } from "tsb";
+
+// Object series holding integers
+const s = new Series({ data: [1, 2, 3], dtype: Dtype.object });
+s.dtype.kind;  // "object"
+
+const better = inferObjectsSeries(s);
+better.dtype.kind; // "int"
+better.values;     // [1, 2, 3]
+
+// Mixed types — cannot infer, returns original
+const mixed = new Series({ data: [1, "a", true], dtype: Dtype.object });
+inferObjectsSeries(mixed).dtype.kind; // "object"
+
+// All null — no inference possible
+const nulls = new Series({ data: [null, null], dtype: Dtype.object });
+inferObjectsSeries(nulls).dtype.kind; // "object"
+ +

inferObjectsDataFrame — all columns at once

+
import { DataFrame, inferObjectsDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({
+  ints:   [1, 2, 3],
+  floats: [1.1, 2.2, 3.3],
+  strs:   ["a", "b", "c"],
+  bools:  [true, false, true],
+});
+
+const inferred = inferObjectsDataFrame(df);
+inferred.col("ints").dtype.kind;   // "int"
+inferred.col("floats").dtype.kind; // "float"
+inferred.col("strs").dtype.kind;   // "string"
+inferred.col("bools").dtype.kind;  // "bool"
+ +

convertDtypesSeries — also parses numeric strings

+
import { Series, convertDtypesSeries } from "tsb";
+
+// String values that look like integers
+const ints = new Series({ data: ["1", "2", "3"] });
+const result = convertDtypesSeries(ints);
+result.dtype.kind; // "int"
+result.values;     // [1, 2, 3]
+
+// String values that look like floats
+const floats = new Series({ data: ["1.5", "2.5", "3.5"] });
+convertDtypesSeries(floats).dtype.kind; // "float"
+
+// Non-numeric strings: unchanged
+const text = new Series({ data: ["apple", "banana"] });
+convertDtypesSeries(text);  // same Series, dtype "string"
+
+// Int series with nulls → can convert to float for NA safety
+import { Dtype } from "tsb";
+const withNull = new Series({ data: [1, null, 3], dtype: Dtype.int64 });
+convertDtypesSeries(withNull, { convertIntegerToFloat: true }).dtype.kind;
+// "float"  (null becomes NaN-compatible)
+ +

convertDtypesDataFrame — per-column conversion

+
import { DataFrame, convertDtypesDataFrame } from "tsb";
+
+// After reading a CSV, all columns come back as strings:
+const raw = DataFrame.fromColumns({
+  age:   ["25", "30", "22"],
+  score: ["88.5", "92.1", "78.0"],
+  name:  ["Alice", "Bob", "Charlie"],
+});
+
+const typed = convertDtypesDataFrame(raw);
+typed.col("age").dtype.kind;   // "int"
+typed.col("score").dtype.kind; // "float"
+typed.col("name").dtype.kind;  // "string" (unchanged — not numeric)
+ +

API reference

+ + + + + + +
FunctionDescription
inferObjectsSeries(s, options?)Infer better dtype for object-typed Series
inferObjectsDataFrame(df, options?)Infer better dtypes for all columns
convertDtypesSeries(s, options?)Convert to best dtype, including string→number parsing
convertDtypesDataFrame(df, options?)Per-column convertDtypesSeries
+ +

InferObjectsOptions

+ + + +
OptionTypeDefaultDescription
objectOnlybooleantrueOnly infer for object-dtype Series (mirrors pandas default)
+ +

ConvertDtypesOptions

+ + + + +
OptionTypeDefaultDescription
convertStringbooleantrueParse string values as numbers when possible
convertIntegerToFloatbooleanfalseConvert int series with nulls to float
+ +

When to use which

+ + + + + +
Use caseFunction
Promote object columns after creationinferObjectsSeries / DataFrame
Parse CSV/JSON string columns to numbersconvertDtypesSeries / DataFrame
Make int columns nullable (float)convertDtypesSeries(s, { convertIntegerToFloat: true })
+ + + + diff --git a/playground/join.html b/playground/join.html new file mode 100644 index 00000000..8a4b66be --- /dev/null +++ b/playground/join.html @@ -0,0 +1,142 @@ + + + + + + tsb — join: label-based DataFrame join + + + + ← Back to tsb playground +

join — label-based DataFrame join

+ +
+ pandas equivalent: DataFrame.join(other, on=None, how='left', lsuffix='', rsuffix='', sort=False) +
+ +

What it does

+

+ join(left, right, options?) aligns two DataFrames by their index labels (or a key column). + Unlike the general-purpose merge(), join() defaults to a left join on index + — the idiomatic way to combine DataFrames that already share an index. +

+ +

Left join (default)

+
import { DataFrame, join } from "tsb";
+
+const employees = DataFrame.fromColumns(
+  { dept: ["Engineering", "Marketing", "Engineering"] },
+  { index: ["alice", "bob", "charlie"] },
+);
+
+const salaries = DataFrame.fromColumns(
+  { salary: [90_000, 75_000] },
+  { index: ["alice", "charlie"] },
+);
+
+join(employees, salaries);
+// dept          salary
+// alice    Engineering  90000
+// bob      Marketing    null    ← no salary for bob
+// charlie  Engineering  75000
+ +

Inner / outer / right join

+
join(employees, salaries, { how: "inner" });
+// Only alice and charlie (keys in BOTH DataFrames)
+
+join(employees, salaries, { how: "outer" });
+// All keys from either DataFrame (nulls where absent)
+
+join(employees, salaries, { how: "right" });
+// All keys from salaries: alice and charlie
+ +

Overlapping columns — use lsuffix / rsuffix

+
const a = DataFrame.fromColumns({ score: [10, 20] }, { index: ["x", "y"] });
+const b = DataFrame.fromColumns({ score: [15, 25] }, { index: ["x", "y"] });
+
+// This would throw — 'score' exists in both without suffix disambiguation:
+// join(a, b);
+
+join(a, b, { lsuffix: "_a", rsuffix: "_b" });
+// score_a  score_b
+// x  10    15
+// y  20    25
+ +

Join on a column key

+
const orders = DataFrame.fromColumns({
+  customerId: ["C1", "C2", "C1"],
+  amount:     [100, 200, 150],
+});
+const customers = DataFrame.fromColumns(
+  { name: ["Alice", "Bob"] },
+  { index: ["C1", "C2"] },
+);
+
+// Join orders.customerId against customers index
+join(orders, customers, { on: "customerId", how: "left" });
+// customerId  amount  name
+// C1          100     Alice
+// C2          200     Bob
+// C1          150     Alice
+ +

joinAll — chain multiple joins

+
import { joinAll } from "tsb";
+
+const base = DataFrame.fromColumns({ A: [1,2,3] }, { index: ["K0","K1","K2"] });
+const b1   = DataFrame.fromColumns({ B: [10,20,30] }, { index: ["K0","K1","K2"] });
+const b2   = DataFrame.fromColumns({ C: [100,200,300] }, { index: ["K0","K1","K2"] });
+
+joinAll(base, [b1, b2]);
+// A  B   C
+// 1  10  100
+// 2  20  200
+// 3  30  300
+ +

crossJoin — Cartesian product

+
import { crossJoin } from "tsb";
+
+const colors = DataFrame.fromColumns({ color: ["red", "blue"] });
+const sizes  = DataFrame.fromColumns({ size:  ["S", "M", "L"] });
+
+crossJoin(colors, sizes);
+// color  size
+// red    S
+// red    M
+// red    L
+// blue   S
+// blue   M
+// blue   L
+ +

API reference

+ + + + + +
FunctionDescription
join(left, right, options?)Label-based join (default: left join on index)
joinAll(left, others[], options?)Chain joins left-to-right
crossJoin(left, right, options?)Cartesian product of two DataFrames
+ +

JoinOptions

+ + + + + + + +
OptionTypeDefaultDescription
how"left" | "right" | "inner" | "outer""left"Join type
onstringindexLeft column to use as join key
lsuffixstring""Suffix for overlapping left columns
rsuffixstring""Suffix for overlapping right columns
sortbooleanfalseSort result by join keys
+ + + + diff --git a/playground/math_ops.html b/playground/math_ops.html new file mode 100644 index 00000000..0d138cda --- /dev/null +++ b/playground/math_ops.html @@ -0,0 +1,100 @@ + + + + + + math_ops — abs, round — tsb playground + + + +

math_ops — abs, round

+

+ Element-wise mathematical transformations for Series and DataFrame. + Mirrors pandas.Series.abs(), pandas.DataFrame.abs(), + pandas.Series.round(), and pandas.DataFrame.round(). + Missing values (null, NaN) are preserved as-is. +

+ +

Interactive Demo

+ + + + +
Click a button above to run an example.
+ +

Code Examples

+
import { Series, DataFrame, absSeries, absDataFrame, roundSeries, roundDataFrame } from "tsb";
+
+// ── absSeries ────────────────────────────────────────────────────────────────
+const s = new Series({ data: [-1, 2, -3, null] });
+absSeries(s).values;   // [1, 2, 3, null]
+
+// ── absDataFrame ─────────────────────────────────────────────────────────────
+const df = DataFrame.fromColumns({ a: [-1, 2], b: [3, -4] });
+absDataFrame(df).col("a").values;  // [1, 2]
+absDataFrame(df).col("b").values;  // [3, 4]
+
+// ── roundSeries ──────────────────────────────────────────────────────────────
+const prices = new Series({ data: [1.234, 5.678, null] });
+roundSeries(prices, 2).values;    // [1.23, 5.68, null]
+roundSeries(prices, 0).values;    // [1, 6, null]
+roundSeries(prices, -1).values;   // nearest 10: [0, 10, null]
+
+// ── roundDataFrame ────────────────────────────────────────────────────────────
+const data = DataFrame.fromColumns({ price: [1.111, 2.222], qty: [3.7, 4.4] });
+roundDataFrame(data, 2).col("price").values;        // [1.11, 2.22]
+roundDataFrame(data, { price: 1, qty: 0 }).col("qty").values; // [4, 4]
+
+ + + + diff --git a/playground/merge_asof.html b/playground/merge_asof.html new file mode 100644 index 00000000..c532a50e --- /dev/null +++ b/playground/merge_asof.html @@ -0,0 +1,126 @@ + + + + + + tsb — merge_asof (ordered nearest-key join) + + + + ← Back to tsb playground +

merge_asof — Ordered Nearest-Key Join

+ +
+ pandas equivalent: pd.merge_asof(left, right, on="time") +
+ +

+ mergeAsof is an ordered left-join that matches on the nearest key + rather than an exact key. It is especially useful for time-series data — e.g., matching + each trade to the most recent quote. +

+ +

Key concepts

+
    +
  • Both DataFrames must be sorted ascending by the key column before calling mergeAsof.
  • +
  • The result always has the same number of rows as the left DataFrame.
  • +
  • direction: controls whether to look backward (default), forward, or for the nearest key.
  • +
  • by: require additional columns to match exactly before doing the asof lookup (e.g. by ticker).
  • +
  • tolerance: ignore matches further than this numeric distance.
  • +
+ +

Basic example — backward (default)

+
import { DataFrame, mergeAsof } from "tsb";
+
+// Each trade is matched to the most recent quote (backward asof)
+const trades = DataFrame.fromColumns({
+  time:  [1,  5, 10],
+  price: [100, 200, 300],
+});
+const quotes = DataFrame.fromColumns({
+  time: [2,  6],
+  bid:  [98, 195],
+});
+
+const result = mergeAsof(trades, quotes, { on: "time" });
+// time | price | bid
+//    1 |   100 | null   ← no quote ≤ 1
+//    5 |   200 |   98   ← most recent quote ≤ 5 is at time=2
+//   10 |   300 |  195   ← most recent quote ≤ 10 is at time=6
+ +

Forward direction

+
// Match each event to the next scheduled announcement
+const events = DataFrame.fromColumns({ t: [1, 3, 7], v: [10, 30, 70] });
+const schedule = DataFrame.fromColumns({ t: [2, 6, 10], w: [20, 60, 100] });
+
+const result = mergeAsof(events, schedule, {
+  on: "t",
+  direction: "forward",
+});
+// t=1 → t=2 (w=20), t=3 → t=6 (w=60), t=7 → t=10 (w=100)
+ +

Nearest direction

+
const result = mergeAsof(trades, quotes, {
+  on: "time",
+  direction: "nearest",
+});
+// Picks the quote with the smallest absolute time difference.
+ +

Grouping with by

+
// Match trades to quotes within the same ticker symbol
+const trades = DataFrame.fromColumns({
+  time:   [1,    2,      3,    4],
+  ticker: ["AAPL","MSFT","AAPL","MSFT"],
+  price:  [100,  200,    110,   210],
+});
+const quotes = DataFrame.fromColumns({
+  time:   [1,    1,      3,    3],
+  ticker: ["AAPL","MSFT","AAPL","MSFT"],
+  bid:    [99,   198,    109,   208],
+});
+
+mergeAsof(trades, quotes, { on: "time", by: "ticker" });
+ +

Tolerance

+
// Only match if the key distance is ≤ 2
+mergeAsof(left, right, { on: "t", tolerance: 2 });
+ +

Different key column names (left_on / right_on)

+
mergeAsof(left, right, {
+  left_on: "trade_time",
+  right_on: "quote_time",
+});
+ +

Using index as key

+
mergeAsof(left, right, {
+  left_index: true,
+  right_on: "timestamp",
+});
+ +

Options reference

+ + + + + + + + + + + +
OptionDefaultDescription
onShared key column name
left_on / right_onDifferent key columns per side
left_index / right_indexfalseUse index as key
byColumn(s) that must match exactly
left_by / right_byDifferent by-columns per side
direction"backward""backward", "forward", or "nearest"
tolerancenullMax numeric key distance for a match
allow_exact_matchestrueInclude exact key matches
suffixes["_x","_y"]Suffixes for overlapping column names
+ + diff --git a/playground/merge_ordered.html b/playground/merge_ordered.html new file mode 100644 index 00000000..661ce854 --- /dev/null +++ b/playground/merge_ordered.html @@ -0,0 +1,147 @@ + + + + + + tsb — merge_ordered (ordered fill merge) + + + + ← Back to tsb playground +

merge_ordered — Ordered Fill Merge

+ +
+ pandas equivalent: pd.merge_ordered(left, right, on="date") +
+ +

+ mergeOrdered is an ordered merge (default outer join) that + sorts the result by the key column(s). It is ideal for time-series and event data where + both DataFrames have partially overlapping key ranges and you want a complete timeline + with optional forward-fill (fill_method: "ffill") to carry values forward. +

+ +

Key concepts

+
    +
  • Default how: "outer" — keeps all keys from both DataFrames.
  • +
  • Result is always sorted ascending by the key column.
  • +
  • fill_method: "ffill" forward-fills null gaps in non-key columns after the merge.
  • +
  • left_by / right_by: perform the ordered merge independently per group and concatenate.
  • +
  • left_on / right_on: use different key column names per side.
  • +
+ +

Basic outer ordered merge

+
import { DataFrame, mergeOrdered } from "tsb";
+
+const left = DataFrame.fromColumns({
+  date:  [1, 3, 5],
+  price: [10, 30, 50],
+});
+const right = DataFrame.fromColumns({
+  date:   [2, 3, 6],
+  volume: [200, 300, 600],
+});
+
+const result = mergeOrdered(left, right, { on: "date" });
+// date | price | volume
+//    1 |    10 |   null
+//    2 |  null |    200
+//    3 |    30 |    300
+//    5 |    50 |   null
+//    6 |  null |    600
+ +

Forward-fill after merge

+
const result = mergeOrdered(left, right, {
+  on: "date",
+  fill_method: "ffill",
+});
+// date | price | volume
+//    1 |    10 |   null   ← no earlier price to fill
+//    2 |    10 |    200   ← price carried forward from date=1
+//    3 |    30 |    300
+//    5 |    50 |    300   ← volume carried forward from date=3
+//    6 |    50 |    600
+ +

Inner join variant

+
// Only rows where both DataFrames have a key
+mergeOrdered(left, right, { on: "date", how: "inner" });
+// date | price | volume
+//    3 |    30 |    300
+ +

Different key column names per side

+
const left2 = DataFrame.fromColumns({ t_left:  [1, 3, 5], a: [10, 30, 50] });
+const right2 = DataFrame.fromColumns({ t_right: [2, 3, 6], b: [200, 300, 600] });
+
+mergeOrdered(left2, right2, { left_on: "t_left", right_on: "t_right" });
+// t_left | a    | b
+//      1 |   10 | null
+//      2 | null |  200
+//      3 |   30 |  300
+//      5 |   50 | null
+//      6 | null |  600
+ +

Group-wise ordered merge (left_by / right_by)

+
// Perform the ordered merge independently for each group
+const left3 = DataFrame.fromColumns({
+  grp: ["A", "A", "B", "B"],
+  k:   [1,   3,   1,   3],
+  a:   [10,  30, 100, 300],
+});
+const right3 = DataFrame.fromColumns({
+  grp: ["A", "A", "B", "B"],
+  k:   [2,   3,   2,   3],
+  b:   [20,  30, 200, 300],
+});
+
+mergeOrdered(left3, right3, {
+  on: "k",
+  left_by: "grp",
+  right_by: "grp",
+});
+// grp | k | a    | b
+//   A | 1 |   10 | null
+//   A | 2 | null |   20
+//   A | 3 |   30 |   30
+//   B | 1 |  100 | null
+//   B | 2 | null |  200
+//   B | 3 |  300 |  300
+ +

Overlapping non-key columns — suffixes

+
const left4 = DataFrame.fromColumns({ k: [1, 2, 3], val: [10, 20, 30] });
+const right4 = DataFrame.fromColumns({ k: [2, 3, 4], val: [200, 300, 400] });
+
+mergeOrdered(left4, right4, { on: "k", suffixes: ["_L", "_R"] });
+// k | val_L | val_R
+// 1 |    10 |  null
+// 2 |    20 |   200
+// 3 |    30 |   300
+// 4 |  null |   400
+ +

API reference

+ + + + + + + + + + + + +
OptionTypeDefaultDescription
onstring | string[]Key column(s) present in both DataFrames
left_onstring | string[]Key column(s) in the left DataFrame
right_onstring | string[]Key column(s) in the right DataFrame
how"outer" | "inner" | "left" | "right""outer"Join type
fill_method"ffill" | nullnullForward-fill null gaps after merge
left_bystring | string[]Group columns in left DataFrame
right_bystring | string[]Group columns in right DataFrame
suffixes[string, string]["_x", "_y"]Suffixes for overlapping non-key columns
+ + diff --git a/playground/notna_boolean.html b/playground/notna_boolean.html new file mode 100644 index 00000000..1776fce0 --- /dev/null +++ b/playground/notna_boolean.html @@ -0,0 +1,104 @@ + + + + + + keepTrue / keepFalse / filterBy — Boolean Indexing — tsb playground + + + +

keepTrue / keepFalse / filterBy — Boolean Indexing

+

+ Boolean-mask selection helpers that mirror the pandas + series[mask] / df[mask] idiom. +

+
    +
  • keepTrue(series, mask) — keep elements where mask is truthy
  • +
  • keepFalse(series, mask) — keep elements where mask is falsy
  • +
  • filterBy(df, mask) — filter DataFrame rows by boolean mask
  • +
+ +

Interactive Demo

+ + + + +
Click a button above to run an example.
+ +

Code Examples

+
import { Series, DataFrame, keepTrue, keepFalse, filterBy } from "tsb";
+
+const s = new Series({ data: [10, 20, 30, 40], index: ["a", "b", "c", "d"] });
+
+// Keep elements where mask is true
+keepTrue(s, [true, false, true, false]).values;  // [10, 30]
+
+// Keep elements where mask is false (complement)
+keepFalse(s, [true, false, true, false]).values; // [20, 40]
+
+// Filter DataFrame rows
+const df = DataFrame.fromColumns(
+  { age: [25, 30, 35, 40], score: [88, 72, 95, 60] },
+  { index: ["alice", "bob", "carol", "dave"] },
+);
+const highScore = df.col("score").values.map((v) => (v as number) >= 80);
+filterBy(df, highScore).col("age").values; // [25, 35]
+
+// Use a Series as a mask
+const mask = new Series({ data: [true, null, true, false], index: ["a", "b", "c", "d"] });
+keepTrue(s, mask).values; // [10, 30]  (null treated as false)
+
+ + + + diff --git a/playground/rename_ops.html b/playground/rename_ops.html new file mode 100644 index 00000000..949447d9 --- /dev/null +++ b/playground/rename_ops.html @@ -0,0 +1,144 @@ + + + + + + rename_ops — Rename, Prefix/Suffix, set_axis, to_frame — tsb playground + + + +

rename_ops — Rename, add_prefix/suffix, set_axis, to_frame

+

+ Functions for renaming labels, adding prefix/suffix to column or index labels, + replacing an axis entirely (set_axis), and converting a Series to a + single-column DataFrame (to_frame). Mirrors the corresponding + pandas methods. +

+ +

Interactive Demo

+ + + + + + +
Click a button above to run an example.
+ +

Code Examples

+
import {
+  Series, DataFrame,
+  renameSeriesIndex, renameDataFrame,
+  addPrefixDataFrame, addSuffixDataFrame,
+  addPrefixSeries, addSuffixSeries,
+  setAxisSeries, setAxisDataFrame,
+  seriesToFrame,
+} from "tsb";
+
+// ── renameSeriesIndex ────────────────────────────────────────────────────────
+const s = new Series({ data: [1, 2, 3], index: ["a", "b", "c"] });
+
+// Record mapping — only matched labels are changed
+renameSeriesIndex(s, { a: "x", c: "z" }).index.values;  // ["x", "b", "z"]
+
+// Function mapper — called for every index label
+renameSeriesIndex(s, l => l.toUpperCase()).index.values; // ["A", "B", "C"]
+
+// ── renameDataFrame ──────────────────────────────────────────────────────────
+const df = DataFrame.fromColumns({ a: [1, 2], b: [3, 4] },
+                                  { index: ["r0", "r1"] });
+
+// Rename columns
+renameDataFrame(df, { columns: { a: "x", b: "y" } }).columns.values; // ["x","y"]
+
+// Rename row index
+renameDataFrame(df, { index: { r0: "row0", r1: "row1" } }).index.values;
+
+// ── add_prefix / add_suffix ──────────────────────────────────────────────────
+addPrefixDataFrame(df, "col_").columns.values;         // ["col_a","col_b"]
+addSuffixDataFrame(df, "_v1").columns.values;          // ["a_v1","b_v1"]
+
+addPrefixSeries(s, "idx_").index.values;               // ["idx_a","idx_b","idx_c"]
+addSuffixSeries(s, "_end").index.values;               // ["a_end","b_end","c_end"]
+
+// ── set_axis ─────────────────────────────────────────────────────────────────
+setAxisSeries(s, ["x", "y", "z"]).index.values;         // ["x","y","z"]
+setAxisDataFrame(df, ["col1","col2"], 1).columns.values; // ["col1","col2"]
+setAxisDataFrame(df, ["rowA","rowB"], 0).index.values;   // ["rowA","rowB"]
+
+// ── to_frame ─────────────────────────────────────────────────────────────────
+const score = new Series({ data: [90, 80, 70], name: "score" });
+seriesToFrame(score).columns.values;          // ["score"]
+seriesToFrame(score, "points").columns.values; // ["points"]
+
+ + + + diff --git a/playground/resample.html b/playground/resample.html new file mode 100644 index 00000000..aa3509b6 --- /dev/null +++ b/playground/resample.html @@ -0,0 +1,323 @@ + + + + + + tsb — resample() + + + +
+

tsb — resample()

+

Time-based resampling and aggregation for Series and DataFrame · mirrors pandas.DataFrame.resample

+
+ +
+ + +
+

Overview

+

+ resample groups a time-indexed Series or DataFrame into fixed-size time bins + (seconds, minutes, hours, days, weeks, months, quarters, or years) and applies an aggregation + function to each bin. Empty bins are automatically included in the output, filled with + NaN for numeric aggregations or 0 for count/size. +

+

Supported frequencies

+ + + + + + + + + + + + + + +
StringIntervalDefault label
"S"Secondleft (bin start)
"T" / "min"Minuteleft
"H"Hourleft
"D"Calendar day (UTC)left — UTC midnight
"W" / "W-SUN"Week ending Sundayright — Sunday
"W-MON""W-SAT"Week ending on weekdayright — anchor day
"MS"Month start (1st)left
"ME"Month end (last day)right — last day
"QS"Quarter startleft
"QE"Quarter endright — last day of quarter
"AS" / "YS"Year start (Jan 1)left
"AE" / "YE"Year end (Dec 31)right
+
+ + +
+

Example 1 — Daily sum of a price Series

+
import { Series, resampleSeries } from "tsb";
+
+const dates = [
+  new Date("2024-01-01T09:00Z"),
+  new Date("2024-01-01T15:00Z"),
+  new Date("2024-01-02T10:00Z"),
+  new Date("2024-01-02T16:00Z"),
+  new Date("2024-01-04T09:00Z"), // note: Jan 3 is empty
+];
+const prices = new Series({ data: [100, 105, 98, 110, 120], index: dates, name: "price" });
+
+const daily = resampleSeries(prices, "D").sum();
+// Jan 1: 205   Jan 2: 208   Jan 3: NaN (empty)   Jan 4: 120
+console.log(daily.index.values.map(d => d.toISOString().slice(0,10)));
+console.log(daily.toArray());
+ +
Click "Run" to execute.
+
+ + +
+

Example 2 — Monthly mean with month-start labels

+
import { Series, resampleSeries } from "tsb";
+
+const timestamps = [
+  new Date("2024-01-05Z"), new Date("2024-01-20Z"),
+  new Date("2024-02-10Z"), new Date("2024-02-25Z"),
+  new Date("2024-03-15Z"),
+];
+const values = new Series({ data: [10, 20, 30, 40, 50], index: timestamps });
+
+const monthly = resampleSeries(values, "MS").mean();
+// Jan: 15   Feb: 35   Mar: 50
+console.log(monthly.index.values.map(d => d.toISOString().slice(0,7)));
+console.log(monthly.toArray());
+ +
Click "Run" to execute.
+
+ + +
+

Example 3 — OHLC (Open-High-Low-Close) aggregation

+
import { Series, resampleSeries } from "tsb";
+
+const ticks = [
+  new Date("2024-01-01T09:00Z"), new Date("2024-01-01T10:00Z"),
+  new Date("2024-01-01T11:00Z"), new Date("2024-01-01T15:00Z"),
+];
+const px = new Series({ data: [100, 108, 95, 103], index: ticks, name: "AAPL" });
+
+const ohlc = resampleSeries(px, "D").ohlc();
+console.log("open :", ohlc.col("open").toArray());
+console.log("high :", ohlc.col("high").toArray());
+console.log("low  :", ohlc.col("low").toArray());
+console.log("close:", ohlc.col("close").toArray());
+ +
Click "Run" to execute.
+
+ + +
+

Example 4 — DataFrame resample with per-column aggregations

+
import { DataFrame, Index, resampleDataFrame } from "tsb";
+
+const idx = new Index([
+  new Date("2024-01-01Z"), new Date("2024-01-01T12:00Z"),
+  new Date("2024-01-02Z"), new Date("2024-01-02T18:00Z"),
+]);
+const df = DataFrame.fromColumns(
+  { revenue: [100, 200, 150, 50], visits: [10, 20, 5, 15] },
+  { index: idx },
+);
+
+// Different aggregation per column
+const result = resampleDataFrame(df, "D").agg({
+  revenue: "sum",
+  visits: "mean",
+});
+console.log("revenue:", result.col("revenue").toArray()); // [300, 200]
+console.log("visits :", result.col("visits").toArray());  // [15, 10]
+console.log("index  :", result.index.values.map(d => d.toISOString().slice(0,10)));
+ +
Click "Run" to execute.
+
+ + +
+

Example 5 — Weekly resample (labeled by Sunday)

+
import { Series, resampleSeries } from "tsb";
+
+// Jan 8 2024 = Monday, Jan 14 = Sunday
+const dates = [
+  new Date("2024-01-08Z"), new Date("2024-01-10Z"), new Date("2024-01-14Z"),
+  new Date("2024-01-15Z"), new Date("2024-01-18Z"),
+];
+const s = new Series({ data: [1, 2, 3, 4, 5], index: dates });
+const weekly = resampleSeries(s, "W").sum();
+
+// Week 1 (ends Jan 14): 1+2+3=6   Week 2 (ends Jan 21): 4+5=9
+console.log(weekly.index.values.map(d => d.toISOString().slice(0,10)));
+console.log(weekly.toArray());
+ +
Click "Run" to execute.
+
+ + +
+

Example 6 — Custom aggregation function

+
import { Series, resampleSeries } from "tsb";
+
+const dates = [
+  new Date("2024-01-01Z"), new Date("2024-01-01T12:00Z"),
+  new Date("2024-01-02Z"),
+];
+const s = new Series({ data: [2, 4, 8], index: dates });
+
+// Product of each bin
+const product = resampleSeries(s, "D").agg((vals) =>
+  vals.reduce((acc, v) => (typeof v === "number" ? acc * v : acc), 1)
+);
+console.log(product.toArray()); // [8, 8]
+ +
Click "Run" to execute.
+
+ +
+

API Reference

+

resampleSeries(series, freq, options?)

+

Returns a SeriesResampler with methods: .sum(), .mean(), .min(), .max(), .count(), .first(), .last(), .std(), .var(), .size(), .ohlc(), .agg(spec).

+

resampleDataFrame(df, freq, options?)

+

Returns a DataFrameResampler with the same numeric aggregation methods (each returning a DataFrame), plus .size() (returns a Series), and .agg(spec) where spec can be a per-column object.

+

options

+ + + +
OptionTypeDescription
label"left" | "right"Override the default label side for the output index.
+
+ +
+

See also

+

+ groupby — label-based grouping · + rolling — rolling window · + date_range — generate datetime indices +

+
+ +
+ + + + diff --git a/playground/scalar_extract.html b/playground/scalar_extract.html new file mode 100644 index 00000000..16615590 --- /dev/null +++ b/playground/scalar_extract.html @@ -0,0 +1,156 @@ + + + + + + scalar_extract — tsb playground + + + + ← Back to playground index +

scalar_extract — squeeze / item / bool / first_valid_index / last_valid_index

+

+ Utilities to extract scalar values from Series and DataFrames. + Mirrors pandas.Series.squeeze(), Series.item(), + Series.bool(), Series.first_valid_index(), + Series.last_valid_index(), and their DataFrame equivalents. +

+ +

squeezeSeries — extract scalar from a single-element Series

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([42])
+print(s.squeeze())    # 42
+
+s2 = pd.Series([1, 2, 3])
+print(s2.squeeze())   # Series unchanged
+
+

tsb equivalent:

+
import { Series, squeezeSeries } from "tsb";
+
+squeezeSeries(new Series({ data: [42] }));     // 42
+squeezeSeries(new Series({ data: [1, 2, 3] })); // Series([1, 2, 3])
+
+ +

squeezeDataFrame — squeeze 1-D axis objects

+

Python pandas equivalent:

+
import pandas as pd
+
+df1x1 = pd.DataFrame({"A": [10]})
+print(df1x1.squeeze())          # 10 (scalar)
+
+df1xN = pd.DataFrame({"A": [1], "B": [2], "C": [3]})
+print(df1xN.squeeze())          # Series indexed by column names
+
+dfNx1 = pd.DataFrame({"A": [1, 2, 3]})
+print(dfNx1.squeeze())          # Series indexed by row labels
+print(dfNx1.squeeze(axis=1))    # same as above
+
+

tsb equivalent:

+
import { DataFrame, squeezeDataFrame } from "tsb";
+
+// 1×1 → scalar
+squeezeDataFrame(DataFrame.fromColumns({ A: [10] }));          // 10
+
+// 1 row, N cols → Series over columns
+squeezeDataFrame(DataFrame.fromColumns({ A: [1], B: [2] }));   // Series([1, 2])
+
+// N rows, 1 col → Series over rows
+squeezeDataFrame(DataFrame.fromColumns({ A: [1, 2, 3] }));     // Series([1, 2, 3])
+
+// axis=1: force squeeze along columns axis
+squeezeDataFrame(DataFrame.fromColumns({ A: [1, 2, 3] }), 1);  // Series([1, 2, 3])
+
+ +

itemSeries — return the single element of a Series

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([7])
+print(s.item())   # 7
+
+s2 = pd.Series([1, 2])
+s2.item()  # ValueError
+
+

tsb equivalent:

+
import { Series, itemSeries } from "tsb";
+
+itemSeries(new Series({ data: [7] }));       // 7
+itemSeries(new Series({ data: [1, 2] }));    // throws RangeError
+
+ +

boolSeries / boolDataFrame — convert to boolean

+

Python pandas equivalent:

+
import pandas as pd
+
+pd.Series([1]).bool()     # True
+pd.Series([0]).bool()     # False
+pd.DataFrame({"A": [1]}).bool()  # True
+
+

tsb equivalent:

+
import { Series, DataFrame, boolSeries, boolDataFrame } from "tsb";
+
+boolSeries(new Series({ data: [1] }));               // true
+boolSeries(new Series({ data: [0] }));               // false
+boolDataFrame(DataFrame.fromColumns({ A: [1] }));    // true
+boolDataFrame(DataFrame.fromColumns({ A: [false] })); // false
+
+ +

firstValidIndex / lastValidIndex — find first/last non-NA label

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+s = pd.Series([None, np.nan, 3.0, 4.0], index=["a", "b", "c", "d"])
+print(s.first_valid_index())  # "a" ... wait: "c"
+print(s.last_valid_index())   # "d"
+
+s_all_na = pd.Series([None, None])
+print(s_all_na.first_valid_index())  # None
+
+

tsb equivalent:

+
import { Series, firstValidIndex, lastValidIndex } from "tsb";
+
+const s = new Series({ data: [null, NaN, 3, 4], index: ["a", "b", "c", "d"] });
+firstValidIndex(s);   // "c"
+lastValidIndex(s);    // "d"
+
+const allNA = new Series({ data: [null, null] });
+firstValidIndex(allNA);  // null
+
+ +

dataFrameFirstValidIndex / dataFrameLastValidIndex

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+df = pd.DataFrame({"A": [None, None, 1], "B": [None, 2, 3]})
+print(df.first_valid_index())  # 1  (row 1 has B=2)
+print(df.last_valid_index())   # 2  (row 2 has A=1, B=3)
+
+

tsb equivalent:

+
import { DataFrame, dataFrameFirstValidIndex, dataFrameLastValidIndex } from "tsb";
+
+const df = DataFrame.fromColumns({
+  A: [null, null, 1],
+  B: [null, 2, 3],
+});
+dataFrameFirstValidIndex(df);  // 1
+dataFrameLastValidIndex(df);   // 2
+
+ + + + diff --git a/playground/sort_ops.html b/playground/sort_ops.html new file mode 100644 index 00000000..4e048de9 --- /dev/null +++ b/playground/sort_ops.html @@ -0,0 +1,126 @@ + + + + + + sort_ops — tsb playground + + + + ← Back to playground index +

sort_ops — sort_values and sort_index for Series and DataFrame

+

+ Sorting utilities that mirror pandas' sort_values and + sort_index methods. All functions are pure — they return a + new object without modifying the input. +

+ +

sortValuesSeries — sort a Series by its values

+

Python pandas equivalent:

+
import pandas as pd
+s = pd.Series([3, 1, 2], index=["b", "a", "c"])
+s.sort_values()
+# a    1
+# c    2
+# b    3
+s.sort_values(ascending=False)
+# b    3
+# c    2
+# a    1
+
+

tsb equivalent:

+
import { Series, sortValuesSeries } from "tsb";
+
+const s = new Series({ data: [3, 1, 2], index: ["b", "a", "c"] });
+
+sortValuesSeries(s);                      // [1, 2, 3] index: ["a","c","b"]
+sortValuesSeries(s, { ascending: false }); // [3, 2, 1] index: ["b","c","a"]
+sortValuesSeries(s, { ignoreIndex: true }); // resets index to [0, 1, 2]
+
+ +

NaN / null handling

+

Python pandas equivalent:

+
s = pd.Series([3.0, None, 1.0])
+s.sort_values()               # 1, 3, NaN  (NaN last by default)
+s.sort_values(na_position="first")  # NaN, 1, 3
+
+

tsb equivalent:

+
const s2 = new Series({ data: [3, null, 1] });
+sortValuesSeries(s2);                           // [1, 3, null]
+sortValuesSeries(s2, { naPosition: "first" }); // [null, 1, 3]
+
+ +

sortIndexSeries — sort a Series by its index labels

+

Python pandas equivalent:

+
s.sort_index()  # sort by label alphabetically / numerically
+
+

tsb equivalent:

+
import { sortIndexSeries } from "tsb";
+
+const s = new Series({ data: [3, 1, 2], index: ["b", "a", "c"] });
+sortIndexSeries(s);
+// values: [1, 3, 2], index: ["a", "b", "c"]
+
+ +

sortValuesDataFrame — sort DataFrame rows by column values

+

Python pandas equivalent:

+
df = pd.DataFrame({"a": [3, 1, 2], "b": [10, 30, 20]})
+df.sort_values("a")
+#    a   b
+# 1  1  30
+# 2  2  20
+# 0  3  10
+
+df.sort_values(["a", "b"], ascending=[True, False])
+
+

tsb equivalent:

+
import { DataFrame, sortValuesDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({ a: [3, 1, 2], b: [10, 30, 20] });
+
+sortValuesDataFrame(df, "a");
+// col a: [1, 2, 3]  col b: [30, 20, 10]
+
+sortValuesDataFrame(df, ["a", "b"], { ascending: [true, false] });
+// compound sort: by a ascending, then b descending
+
+ +

sortIndexDataFrame — sort DataFrame rows (or columns) by index

+

Python pandas equivalent:

+
df.sort_index()           # sort rows by row-index labels
+df.sort_index(axis=1)    # sort columns alphabetically
+
+

tsb equivalent:

+
import { sortIndexDataFrame } from "tsb";
+
+const df2 = DataFrame.fromColumns({ z: [1], a: [2], m: [3] });
+sortIndexDataFrame(df2, { axis: 1 });
+// columns in alphabetical order: "a", "m", "z"
+
+const df3 = DataFrame.fromColumns({ v: [1, 2, 3] }, { index: ["c", "a", "b"] });
+sortIndexDataFrame(df3);
+// rows in index order: "a" (2), "b" (3), "c" (1)
+
+ +

Summary of options

+
// sortValuesSeries(s, { ascending?, naPosition?, ignoreIndex? })
+// sortIndexSeries(s, { ascending?, naPosition?, ignoreIndex? })
+// sortValuesDataFrame(df, by, { ascending?, naPosition?, ignoreIndex? })
+// sortIndexDataFrame(df, { ascending?, axis?, naPosition?, ignoreIndex? })
+//
+// ascending    — true (default) or false
+// naPosition   — "last" (default) or "first"
+// ignoreIndex  — false (default) or true (resets index to 0, 1, 2, ...)
+// axis         — 0 (rows, default) or 1 (columns, sortIndexDataFrame only)
+
+ + diff --git a/playground/str_findall_and_json_denormalize.html b/playground/str_findall_and_json_denormalize.html new file mode 100644 index 00000000..c0e915e8 --- /dev/null +++ b/playground/str_findall_and_json_denormalize.html @@ -0,0 +1,258 @@ + + + + + + tsb — str.findall & to_json_normalize + + + +

🔍 tsb — str.findall & toJsonDenormalize

+

+ Two new features in tsb: + strFindall / strFindallCount / strFindFirst / strFindallExpand + (mirrors pandas.Series.str.findall) + and + toJsonDenormalize / toJsonRecords / toJsonSplit / toJsonIndex + (the inverse of jsonNormalize). +

+

← Back to feature index

+ + +
+

1. strFindall — all regex matches per element

+

Mirrors pandas.Series.str.findall(pat). Returns a Series where each value is a JSON-encoded array of all non-overlapping matches.

+
// pandas equivalent:
+// s.str.findall(r'\d+')
+
+import { Series } from 'tsb';
+import { strFindall, strFindallCount, strFindFirst } from 'tsb';
+
+const prices = new Series({ data: ['$10.99 and $5.00', 'free!', '$3.50'] });
+
+const allPrices = strFindall(prices, /\$[\d.]+/);
+// Series [
+//   '["$10.99","$5.00"]',   ← JSON string
+//   '[]',
+//   '["$3.50"]'
+// ]
+
+// Parse the JSON to get actual arrays:
+JSON.parse(allPrices.values[0]); // ["$10.99", "$5.00"]
+JSON.parse(allPrices.values[1]); // []
+
✅ Each element contains a JSON.stringify(string[]) result.
+ +

With capture groups

+
// When the pattern has a capture group, returns the captured value
+const s = new Series({ data: ['name: Alice', 'name: Bob', 'unknown'] });
+const names = strFindall(s, /name: (\w+)/);
+// Series ['["Alice"]', '["Bob"]', '[]']
+
+// First capture group is extracted (pandas behaviour)
+ +

Null / NaN handling

+
const s = new Series({ data: ['hello', null, NaN, 'world'] });
+const result = strFindall(s, /\w+/);
+// Series ['["hello"]', null, null, '["world"]']
+// Null/NaN elements return null (not []) — matches pandas
+
+ + +
+

2. strFindallCount — count matches per element

+
import { strFindallCount } from 'tsb';
+
+const words = new Series({ data: ['one two three', 'four', 'five six'] });
+const counts = strFindallCount(words, /\b\w+\b/);
+// Series [3, 1, 2]
+
+// Count vowels per word
+const vowels = new Series({ data: ['beautiful', 'rhythm', 'aeiou'] });
+strFindallCount(vowels, /[aeiou]/i);
+// Series [5, 0, 5]
+
💡 More efficient than strFindall when you only need the count, not the matches themselves.
+
+ + +
+

3. strFindFirst — first match per element

+
import { strFindFirst } from 'tsb';
+
+const logs = new Series({ data: [
+  '2024-01-15: ERROR occurred',
+  '2024-02-20: INFO ok',
+  'no date here',
+] });
+
+const dates = strFindFirst(logs, /\d{4}-\d{2}-\d{2}/);
+// Series ['2024-01-15', '2024-02-20', null]
+
+// Extract just the year (first capture group)
+const years = strFindFirst(logs, /(\d{4})-\d{2}-\d{2}/);
+// Series ['2024', '2024', null]
+
+ + +
+

4. strFindallExpand — expand capture groups into a DataFrame

+

Mirrors pandas.Series.str.extract(pat, expand=True).

+
import { strFindallExpand } from 'tsb';
+
+const people = new Series({ data: ['John 30', 'Jane 25', 'unknown'] });
+
+// Named capture groups → column names
+const df = strFindallExpand(people, /(?<name>\w+)\s+(?<age>\d+)/);
+//    name  age
+// 0  John  30
+// 1  Jane  25
+// 2  null  null
+
+// Unnamed groups → numbered columns "0", "1", ...
+const df2 = strFindallExpand(people, /(\w+)\s+(\d+)/);
+//    0     1
+// 0  John  30
+// 1  Jane  25
+// 2  null  null
+
+ + +
+

5. toJsonDenormalize — flat DataFrame → nested JSON

+

The inverse of jsonNormalize: takes a DataFrame with dot-separated column names and reconstructs nested JSON objects.

+
import { DataFrame } from 'tsb';
+import { toJsonDenormalize } from 'tsb';
+
+// Start with a flattened DataFrame (as jsonNormalize would produce)
+const flat = DataFrame.fromColumns({
+  name:             ['Alice', 'Bob'],
+  'address.city':   ['New York', 'Los Angeles'],
+  'address.zip':    ['10001',    '90001'],
+  'address.country':['US',       'US'],
+});
+
+// Reconstruct nested JSON
+const records = toJsonDenormalize(flat);
+// [
+//   { name: 'Alice', address: { city: 'New York',    zip: '10001', country: 'US' } },
+//   { name: 'Bob',   address: { city: 'Los Angeles', zip: '90001', country: 'US' } },
+// ]
+
+// Round-trip: jsonNormalize → toJsonDenormalize
+import { jsonNormalize } from 'tsb';
+const original = [
+  { user: { name: 'Alice', age: 30 }, score: 100 },
+  { user: { name: 'Bob',   age: 25 }, score: 200 },
+];
+const df = jsonNormalize(original);
+const recovered = toJsonDenormalize(df);
+// recovered ≈ original (with the same structure)
+ +

Custom separator

+
// If jsonNormalize was called with sep='__'
+const df2 = DataFrame.fromColumns({
+  'user__name': ['Alice'],
+  'user__city': ['NYC'],
+});
+toJsonDenormalize(df2, { sep: '__' });
+// [{ user: { name: 'Alice', city: 'NYC' } }]
+ +

Drop null values

+
const df3 = DataFrame.fromColumns({ a: [1, null], b: [null, 2] });
+toJsonDenormalize(df3, { dropNull: true });
+// [{ a: 1 }, { b: 2 }]  ← null fields are omitted
+
+ + +
+

6. JSON serialization utilities

+ +

toJsonRecords — orient="records"

+
import { toJsonRecords } from 'tsb';
+const df = DataFrame.fromColumns({ a: [1, 2], b: ['x', 'y'] });
+toJsonRecords(df);
+// [{ a: 1, b: 'x' }, { a: 2, b: 'y' }]
+ +

toJsonSplit — orient="split"

+
import { toJsonSplit } from 'tsb';
+toJsonSplit(df);
+// { columns: ['a', 'b'], index: [0, 1], data: [[1, 'x'], [2, 'y']] }
+
+toJsonSplit(df, { includeIndex: false });
+// { columns: ['a', 'b'], data: [[1, 'x'], [2, 'y']] }
+ +

toJsonIndex — orient="index"

+
import { toJsonIndex } from 'tsb';
+toJsonIndex(df);
+// { '0': { a: 1, b: 'x' }, '1': { a: 2, b: 'y' } }
+
+// With custom string index
+const df2 = DataFrame.fromColumns(
+  { v: [10, 20] },
+  { index: ['alice', 'bob'] }
+);
+toJsonIndex(df2);
+// { alice: { v: 10 }, bob: { v: 20 } }
+
+ +
+

API reference

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FunctionSignaturepandas equivalent
strFindall(input, pat, flags?) → Series<Scalar>s.str.findall(pat)
strFindallCount(input, pat, flags?) → Series<Scalar>s.str.findall(pat).map(len)
strFindFirst(input, pat, flags?) → Series<Scalar>s.str.extract(pat)[0]
strFindallExpand(input, pat, flags?) → DataFrames.str.extract(pat, expand=True)
toJsonDenormalize(df, options?) → JsonRecord[]inverse of json_normalize
toJsonRecords(df) → JsonRecord[]df.to_json(orient='records')
toJsonSplit(df, options?) → JsonSplitResultdf.to_json(orient='split')
toJsonIndex(df) → JsonRecorddf.to_json(orient='index')
+
+ + diff --git a/playground/swaplevel.html b/playground/swaplevel.html new file mode 100644 index 00000000..52673cd1 --- /dev/null +++ b/playground/swaplevel.html @@ -0,0 +1,124 @@ + + + + + + swapLevel / reorderLevels — tsb playground + + + + ← Back to playground index +

swapLevel & reorderLevels

+

+ Reorder the levels of a MultiIndex on a Series or DataFrame. + Mirrors pandas.Series.swaplevel, + pandas.DataFrame.swaplevel, + pandas.Series.reorder_levels, and + pandas.DataFrame.reorder_levels. +

+ +

swapLevelSeries — swap two levels

+

Python pandas equivalent:

+
import pandas as pd
+
+idx = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
+s = pd.Series([10, 20, 30], index=idx)
+
+swapped = s.swaplevel(0, 1)
+print(swapped.index.tolist())
+# [(1, 'a'), (2, 'a'), (1, 'b')]
+
+

tsb equivalent:

+
import { MultiIndex, Series, swapLevelSeries } from "tsb";
+import type { Index, Label } from "tsb";
+
+const mi = MultiIndex.fromTuples([["a", 1], ["a", 2], ["b", 1]]);
+const s = new Series({ data: [10, 20, 30], index: mi as unknown as Index<Label> });
+
+const swapped = swapLevelSeries(s, 0, 1);
+// index tuples: [(1,"a"), (2,"a"), (1,"b")]
+// values: [10, 20, 30]
+
+ +

swapLevelDataFrame — swap row-index levels

+

Python pandas equivalent:

+
import pandas as pd
+
+idx = pd.MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["letter", "number"])
+df = pd.DataFrame({"x": [10, 20]}, index=idx)
+
+swapped = df.swaplevel("letter", "number")
+print(swapped.index.tolist())
+# [(1, 'a'), (2, 'b')]
+
+

tsb equivalent:

+
import { DataFrame, MultiIndex, swapLevelDataFrame } from "tsb";
+import type { Index, Label } from "tsb";
+
+const mi = MultiIndex.fromTuples([["a", 1], ["b", 2]], { names: ["letter", "number"] });
+const df = DataFrame.fromColumns(
+  { x: [10, 20] },
+  { index: mi as unknown as Index<Label> },
+);
+
+const swapped = swapLevelDataFrame(df, "letter", "number");
+// row index tuples: [(1,"a"), (2,"b")]
+
+ +

reorderLevelsSeries — arbitrary level reordering

+

Python pandas equivalent:

+
import pandas as pd
+
+idx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2], ["x", "y"]])
+s = pd.Series([10, 20], index=idx)
+
+reordered = s.reorder_levels([2, 0, 1])
+print(reordered.index.tolist())
+# [("x", "a", 1), ("y", "b", 2)]
+
+

tsb equivalent:

+
import { MultiIndex, Series, reorderLevelsSeries } from "tsb";
+import type { Index, Label } from "tsb";
+
+const mi = MultiIndex.fromArrays([["a", "b"], [1, 2], ["x", "y"]]);
+const s = new Series({ data: [10, 20], index: mi as unknown as Index<Label> });
+
+const reordered = reorderLevelsSeries(s, [2, 0, 1]);
+// index tuples: [("x","a",1), ("y","b",2)]
+
+ +

reorderLevelsDataFrame

+

Python pandas equivalent:

+
import pandas as pd
+
+idx = pd.MultiIndex.from_tuples([("a", 1), ("b", 2)])
+df = pd.DataFrame({"v": [5, 15]}, index=idx)
+
+reordered = df.reorder_levels([1, 0])
+print(reordered.index.tolist())
+# [(1, 'a'), (2, 'b')]
+
+

tsb equivalent:

+
import { DataFrame, MultiIndex, reorderLevelsDataFrame } from "tsb";
+import type { Index, Label } from "tsb";
+
+const mi = MultiIndex.fromTuples([["a", 1], ["b", 2]]);
+const df = DataFrame.fromColumns(
+  { v: [5, 15] },
+  { index: mi as unknown as Index<Label> },
+);
+
+const reordered = reorderLevelsDataFrame(df, [1, 0]);
+// row index tuples: [(1,"a"), (2,"b")]
+
+ + diff --git a/playground/testing.html b/playground/testing.html new file mode 100644 index 00000000..d202c5ba --- /dev/null +++ b/playground/testing.html @@ -0,0 +1,176 @@ + + + + + + tsb — testing utilities + + + +
+

tsb — testing utilities

+

assertSeriesEqual · assertFrameEqual · assertIndexEqual · mirrors pandas.testing

+
+ +
+ +
+

Overview

+

+ The tsb testing module provides assertion helpers for comparing tsb objects + in test suites — analogous to pandas.testing.assert_series_equal, + assert_frame_equal, and assert_index_equal. +

+

+ When a check fails, a descriptive AssertionError is thrown with information about + which element differed and at which position — making test failures easy to diagnose. +

+
+ +
+

Import

+
import {
+  assertSeriesEqual,
+  assertFrameEqual,
+  assertIndexEqual,
+  AssertionError,
+} from "tsb";
+
+ +
+

assertSeriesEqual(left, right, options?)

+

Assert that two Series contain identical values (with optional tolerance for floats).

+

Passing example

+
import { Series, assertSeriesEqual } from "tsb";
+
+const a = new Series({ data: [1, 2, 3], name: "x" });
+const b = new Series({ data: [1, 2, 3], name: "x" });
+assertSeriesEqual(a, b);
+// ✅ no exception thrown
+ +

Failing example

+
const c = new Series({ data: [1, 2, 99], name: "x" });
+assertSeriesEqual(a, c);
+// ❌ AssertionError: Series: values differ at index 2 (position 2).
+//    left=3, right=99
+ +

Float tolerance

+
const p = new Series({ data: [1.0, 2.0] });
+const q = new Series({ data: [1.0 + 1e-9, 2.0] });  // tiny rounding error
+assertSeriesEqual(p, q);                              // ✅ passes (within default atol=1e-8)
+
+assertSeriesEqual(p, q, { checkExact: true });        // ❌ exact comparison fails
+ +

Options

+ + + + + + + + + +
OptionTypeDefaultDescription
checkDtypesbooleantrueCompare dtype of both Series
checkIndexbooleantrueCompare row index labels
checkNamesbooleantrueCompare Series name and index name
checkExactbooleanfalseExact numeric equality (no tolerance)
rtolnumber1e-5Relative tolerance
atolnumber1e-8Absolute tolerance
objLabelstring"Series"Error message prefix
+
+ +
+

assertFrameEqual(left, right, options?)

+

Assert that two DataFrames are structurally and value-identical.

+ +

Passing example

+
import { DataFrame, assertFrameEqual } from "tsb";
+
+const a = DataFrame.fromColumns({ x: [1, 2], y: [3, 4] });
+const b = DataFrame.fromColumns({ x: [1, 2], y: [3, 4] });
+assertFrameEqual(a, b); // ✅
+ +

Ignore column order

+
const c = DataFrame.fromColumns({ y: [3, 4], x: [1, 2] }); // columns reversed
+assertFrameEqual(a, c, { checkLike: true }); // ✅ order ignored
+ +

Options

+ + + + + + + + + + +
OptionTypeDefaultDescription
checkDtypesbooleantrueCompare column dtypes
checkIndexbooleantrueCompare row index labels
checkNamesbooleantrueCompare index and column names
checkLikebooleanfalseIgnore column order
checkExactbooleanfalseExact numeric equality
rtolnumber1e-5Relative tolerance
atolnumber1e-8Absolute tolerance
objLabelstring"DataFrame"Error message prefix
+
+ +
+

assertIndexEqual(left, right, options?)

+

Assert that two Index objects have identical labels.

+
import { Index, assertIndexEqual } from "tsb";
+
+const a = new Index(["a", "b", "c"]);
+const b = new Index(["a", "b", "c"]);
+assertIndexEqual(a, b); // ✅
+
+const c = new Index(["a", "b", "z"]);
+assertIndexEqual(a, c);
+// ❌ AssertionError: Index: Index values differ at position 2. left=c, right=z
+
+ +
+

AssertionError

+

+ All failed assertions throw an AssertionError instance (extends Error). + It can be caught explicitly or used with expect().toThrow(AssertionError) in bun:test. +

+
import { AssertionError, assertSeriesEqual, Series } from "tsb";
+
+try {
+  assertSeriesEqual(
+    new Series({ data: [1, 2, 3] }),
+    new Series({ data: [1, 2, 4] }),
+  );
+} catch (e) {
+  if (e instanceof AssertionError) {
+    console.error("Assertion failed:", e.message);
+  }
+}
+

+ 💡 In bun:test, use expect(() => assertSeriesEqual(a, b)).toThrow(AssertionError) + to write negative assertions. +

+
+ +
+

pandas equivalents

+ + + + + + +
tsbpandas
assertSeriesEqual(a, b)pd.testing.assert_series_equal(a, b)
assertFrameEqual(a, b)pd.testing.assert_frame_equal(a, b)
assertIndexEqual(a, b)pd.testing.assert_index_equal(a, b)
AssertionErrorAssertionError (Python built-in)
+
+ +
+ + diff --git a/playground/timedelta_range.html b/playground/timedelta_range.html new file mode 100644 index 00000000..f06e0bdf --- /dev/null +++ b/playground/timedelta_range.html @@ -0,0 +1,209 @@ + + + + + + tsb — timedelta_range + + + +
+

tsb — timedelta_range

+

Generate fixed-frequency TimedeltaIndex sequences · mirrors pandas.timedelta_range

+
+
+ ← back to index + +

Frequency Reference

+
+ + + + + + + + + + +
StringDurationExample
W1 week (7 days)"2W" → 14 days per step
D1 calendar day"3D" → 3 days per step
H1 hour"6H" → 6 hours per step
T / min1 minute"30min" → 30 minutes
S1 second"10S" → 10 seconds
L / ms1 millisecond"500ms" → 500 ms
+
+ +

Interactive Builder

+
+

Provide at least 2 of: start, end, periods, freq.

+ +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
Click Generate to produce the TimedeltaIndex.
+
+ +

Preset Examples

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DescriptionCodeAction
5 daily intervals from 0timedelta_range({ start:"0 days", periods:5, freq:"D" })
1-to-3 days in daily stepstimedelta_range({ start:"1 days", end:"3 days", freq:"D" })
4 entries ending at 3 days (freq D)timedelta_range({ end:"3 days", periods:4, freq:"D" })
Linear space 0→2 days, 5 pointstimedelta_range({ start:"0 days", end:"2 days", periods:5 })
6-hour steps, closed=lefttimedelta_range({ start:"0 days", end:"1 days", freq:"6H", closed:"left" })
30-minute intervals, 8 periodstimedelta_range({ start:"0 days", periods:8, freq:"30min" })
+
+
+ + + + diff --git a/playground/transform_agg.html b/playground/transform_agg.html new file mode 100644 index 00000000..39f2e2c6 --- /dev/null +++ b/playground/transform_agg.html @@ -0,0 +1,147 @@ + + + + + + transform — Series.transform / DataFrame.transform — tsb playground + + + +

transform — Series.transform / DataFrame.transform

+

+ Apply one or more functions to a Series or DataFrame and return a result with the + same index (broadcast scalars to full length). + Mirrors pandas.Series.transform() and pandas.DataFrame.transform(). +

+ +

API

+
+import { seriesTransform, dataFrameTransform } from "tsb";
+
+// single function or built-in name → Series
+seriesTransform(s, "cumsum");
+seriesTransform(s, (x) => x);
+
+// array → DataFrame (one column per function)
+seriesTransform(s, ["sum", "cumsum", "mean"]);
+
+// Record → DataFrame with named columns
+seriesTransform(s, { total: "sum", running: "cumsum" });
+
+// DataFrame transform (column-wise by default)
+dataFrameTransform(df, "cumsum");
+dataFrameTransform(df, { a: "sum", b: "cummin" });  // per-column
+dataFrameTransform(df, "cumsum", { axis: 1 });       // row-wise
+  
+ +

Built-in names

+
+Aggregating (broadcast): "sum", "mean", "min", "max", "std", "var", "median",
+                          "count", "first", "last", "prod", "any", "all", "nunique"
+Cumulative (same shape): "cumsum", "cumprod", "cummin", "cummax"
+  
+ +

Interactive Demo

+ + + + + + +
Click a button above to run an example.
+ +

Examples

+
+import { Series, DataFrame, seriesTransform, dataFrameTransform } from "tsb";
+
+const s = new Series({ data: [1, 2, 3, 4] });
+
+// cumulative sum
+seriesTransform(s, "cumsum").values;       // [1, 3, 6, 10]
+
+// broadcast aggregate
+seriesTransform(s, "sum").values;          // [10, 10, 10, 10]
+
+// multiple functions
+const df = seriesTransform(s, ["sum", "cumsum", "mean"]);
+df.col("sum").values;    // [10, 10, 10, 10]
+df.col("cumsum").values; // [1, 3, 6, 10]
+df.col("mean").values;   // [2.5, 2.5, 2.5, 2.5]
+
+// DataFrame transform
+const frame = DataFrame.fromColumns({ a: [1,2,3], b: [10,20,30] });
+dataFrameTransform(frame, "cumsum").col("b").values;  // [10, 30, 60]
+  
+ + + + diff --git a/playground/truncate.html b/playground/truncate.html new file mode 100644 index 00000000..ee4cb76b --- /dev/null +++ b/playground/truncate.html @@ -0,0 +1,132 @@ + + + + + + truncate — tsb playground + + + + ← Back to playground index +

truncate

+

+ Truncate a Series or DataFrame to keep only the elements within a label window + [before, after] (both bounds inclusive). + Mirrors pandas.Series.truncate and + pandas.DataFrame.truncate. +

+ +

truncateSeries — keep rows within [before, after]

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series([10, 20, 30, 40, 50], index=[0, 1, 2, 3, 4])
+
+print(s.truncate(before=1, after=3))
+# 1    20
+# 2    30
+# 3    40
+
+print(s.truncate(before=2))
+# 2    30
+# 3    40
+# 4    50
+
+print(s.truncate(after=2))
+# 0    10
+# 1    20
+# 2    30
+
+

tsb equivalent:

+
import { Series, truncateSeries } from "tsb";
+
+const s = new Series({ data: [10, 20, 30, 40, 50], index: [0, 1, 2, 3, 4] });
+
+truncateSeries(s, 1, 3).values;    // [20, 30, 40]
+truncateSeries(s, 2).values;       // [30, 40, 50]
+truncateSeries(s, undefined, 2).values; // [10, 20, 30]
+
+ +

truncateDataFrame — truncate rows

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame(
+    {"a": [10, 20, 30, 40, 50], "b": [1, 2, 3, 4, 5]},
+    index=[0, 1, 2, 3, 4],
+)
+
+print(df.truncate(before=1, after=3))
+#     a  b
+# 1  20  2
+# 2  30  3
+# 3  40  4
+
+

tsb equivalent:

+
import { DataFrame, truncateDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns(
+  { a: [10, 20, 30, 40, 50], b: [1, 2, 3, 4, 5] },
+  { index: [0, 1, 2, 3, 4] },
+);
+
+const result = truncateDataFrame(df, 1, 3);
+result.col("a").values; // [20, 30, 40]
+result.index.values;    // [1, 2, 3]
+
+ +

truncateDataFrame — truncate columns (axis=1)

+

Python pandas equivalent:

+
import pandas as pd
+
+df = pd.DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
+
+print(df.truncate(before="a", after="b", axis=1))
+#    a  b
+# 0  1  3
+# 1  2  4
+
+

tsb equivalent:

+
import { DataFrame, truncateDataFrame } from "tsb";
+
+const df = DataFrame.fromColumns({ a: [1, 2], b: [3, 4], c: [5, 6] });
+
+const result = truncateDataFrame(df, "a", "b", { axis: 1 });
+result.columns.values; // ["a", "b"]
+
+ +

String index truncation

+

Python pandas equivalent:

+
import pandas as pd
+
+s = pd.Series(
+    [1, 2, 3, 4, 5],
+    index=["apple", "banana", "cherry", "date", "elderberry"],
+)
+
+print(s.truncate(before="banana", after="date"))
+# banana    2
+# cherry    3
+# date      4
+
+

tsb equivalent:

+
import { Series, truncateSeries } from "tsb";
+
+const s = new Series({
+  data: [1, 2, 3, 4, 5],
+  index: ["apple", "banana", "cherry", "date", "elderberry"],
+});
+
+truncateSeries(s, "banana", "date").values; // [2, 3, 4]
+
+ + diff --git a/playground/update.html b/playground/update.html new file mode 100644 index 00000000..fd452d63 --- /dev/null +++ b/playground/update.html @@ -0,0 +1,101 @@ + + + + + + update — tsb playground + + + + ← Back to playground index +

update

+

+ Update a Series or DataFrame in-place using non-NA values from another object. + Mirrors pandas.DataFrame.update and pandas.Series.update. +

+ +

seriesUpdate — basic overwrite

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+s = pd.Series([1, np.nan, 3], index=[0, 1, 2])
+other = pd.Series([np.nan, 20, np.nan], index=[0, 1, 2])
+s.update(other)
+print(s.tolist())
+# [1.0, 20.0, 3.0]
+
+

tsb equivalent:

+
import { Series, seriesUpdate } from "tsb";
+
+const s = new Series({ data: [1, null, 3], index: [0, 1, 2] });
+const other = new Series({ data: [null, 20, null], index: [0, 1, 2] });
+seriesUpdate(s, other).values;
+// [1, 20, 3]
+
+ +

overwrite=false — only fill NA

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+s = pd.Series([1, np.nan, 3])
+other = pd.Series([10, 20, 30])
+s.update(other, overwrite=False)
+print(s.tolist())
+# [1.0, 20.0, 3.0]
+
+

tsb equivalent:

+
import { Series, seriesUpdate } from "tsb";
+
+const s = new Series({ data: [1, null, 3] });
+const other = new Series({ data: [10, 20, 30] });
+seriesUpdate(s, other, { overwrite: false }).values;
+// [1, 20, 3]
+
+ +

dataFrameUpdate — update from another DataFrame

+

Python pandas equivalent:

+
import pandas as pd
+import numpy as np
+
+df = pd.DataFrame({"a": [1, np.nan, 3], "b": [10, 20, 30]})
+other = pd.DataFrame({"a": [np.nan, 99, np.nan]})
+df.update(other)
+print(df)
+#      a     b
+# 0  1.0  10.0
+# 1  99.0  20.0
+# 2  3.0  30.0
+
+

tsb equivalent:

+
import { DataFrame, dataFrameUpdate } from "tsb";
+
+const df = DataFrame.fromColumns({ a: [1, null, 3], b: [10, 20, 30] });
+const other = DataFrame.fromColumns({ a: [null, 99, null] });
+const result = dataFrameUpdate(df, other);
+result.col("a").values; // [1, 99, 3]
+result.col("b").values; // [10, 20, 30]
+
+ +

Label alignment

+

tsb equivalent:

+
import { Series, seriesUpdate } from "tsb";
+
+const s = new Series({ data: [1, 2, 3], index: [0, 1, 2] });
+// other only has label 1 — other labels unchanged
+const other = new Series({ data: [99], index: [1] });
+seriesUpdate(s, other).values;
+// [1, 99, 3]
+
+ + diff --git a/playground/xs.html b/playground/xs.html new file mode 100644 index 00000000..76af21ad --- /dev/null +++ b/playground/xs.html @@ -0,0 +1,109 @@ + + + + + + xs — Cross-Section Selection — tsb playground + + + +

xs — Cross-Section Selection

+

+ xsDataFrame(df, key) extracts a row by label as a Series, or + a column by name (with axis: 1). Works with both flat and + MultiIndex DataFrames. +

+ +

Interactive Demo

+ + + +
Click a button above to run an example.
+ +

Code Examples

+
import { DataFrame, xsDataFrame, xsSeries, MultiIndex } from "tsb";
+
+// ── flat index ──────────────────────────────────────────────────────────────
+const df = DataFrame.fromColumns(
+  { a: [1, 2, 3], b: [4, 5, 6] },
+  { index: ["x", "y", "z"] },
+);
+
+// Select row "y" → Series { a: 2, b: 5 }
+xsDataFrame(df, "y");
+
+// Select column "b" → Series { x: 4, y: 5, z: 6 }
+xsDataFrame(df, "b", { axis: 1 });
+
+// ── MultiIndex ─────────────────────────────────────────────────────────────
+const mi = MultiIndex.fromTuples([
+  ["A", 1], ["A", 2],
+  ["B", 1], ["B", 2],
+]);
+const miDf = new DataFrame( ... , mi);
+
+// All "A" rows → DataFrame with 2 rows
+xsDataFrame(miDf, "A");
+
+// ── Series ─────────────────────────────────────────────────────────────────
+const s = new Series({ data: [10, 20, 30], index: ["a", "b", "c"] });
+xsSeries(s, "b"); // → 20
+
+ + + + diff --git a/src/core/date_offset.ts b/src/core/date_offset.ts index 4eda968e..f4c80e9f 100644 --- a/src/core/date_offset.ts +++ b/src/core/date_offset.ts @@ -304,7 +304,10 @@ function applyWeek(date: Date, n: number, jsDow: number | null): Date { export class Day implements DateOffset { readonly name = "Day"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } /** Convenience factory: `Day.of(3)` equivalent to `new Day(3)`. */ static of(n = 1): Day { @@ -347,7 +350,10 @@ export class Day implements DateOffset { export class Hour implements DateOffset { readonly name = "Hour"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): Hour { return new Hour(n); @@ -386,7 +392,10 @@ export class Hour implements DateOffset { export class Minute implements DateOffset { readonly name = "Minute"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): Minute { return new Minute(n); @@ -425,7 +434,10 @@ export class Minute implements DateOffset { export class Second implements DateOffset { readonly name = "Second"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): Second { return new Second(n); @@ -464,7 +476,10 @@ export class Second implements DateOffset { export class Milli implements DateOffset { readonly name = "Milli"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): Milli { return new Milli(n); @@ -522,12 +537,11 @@ export class Week implements DateOffset { * Weekday anchor (pandas convention: 0 = Monday, …, 6 = Sunday). * `null` means no alignment. */ + readonly n: number; readonly weekday: number | null; - constructor( - readonly n = 1, - options: WeekOptions = {}, - ) { + constructor(n = 1, options: WeekOptions = {}) { + this.n = n; this.weekday = options.weekday ?? null; } @@ -591,7 +605,10 @@ export class Week implements DateOffset { export class MonthEnd implements DateOffset { readonly name = "MonthEnd"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): MonthEnd { return new MonthEnd(n); @@ -652,7 +669,10 @@ export class MonthEnd implements DateOffset { export class MonthBegin implements DateOffset { readonly name = "MonthBegin"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): MonthBegin { return new MonthBegin(n); @@ -709,7 +729,10 @@ export class MonthBegin implements DateOffset { export class YearEnd implements DateOffset { readonly name = "YearEnd"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): YearEnd { return new YearEnd(n); @@ -761,7 +784,10 @@ export class YearEnd implements DateOffset { export class YearBegin implements DateOffset { readonly name = "YearBegin"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): YearBegin { return new YearBegin(n); @@ -816,7 +842,10 @@ export class YearBegin implements DateOffset { export class BusinessDay implements DateOffset { readonly name = "BusinessDay"; - constructor(readonly n = 1) {} + readonly n: number; + constructor(n = 1) { + this.n = n; + } static of(n = 1): BusinessDay { return new BusinessDay(n); diff --git a/src/core/frame.ts b/src/core/frame.ts index 91b28377..ddb641a1 100644 --- a/src/core/frame.ts +++ b/src/core/frame.ts @@ -131,6 +131,21 @@ export class DataFrame { return new DataFrame(colMap, rowIndex); } + /** + * Alias for {@link fromColumns}. Create a DataFrame from an object mapping column names to value arrays. + * + * @example + * ```ts + * const df = DataFrame.fromArrays({ a: [1, 2, 3], b: [4, 5, 6] }); + * ``` + */ + static fromArrays( + data: Readonly>, + options?: DataFrameOptions, + ): DataFrame { + return DataFrame.fromColumns(data, options); + } + /** * Create a DataFrame from an array of row objects. * @@ -758,11 +773,23 @@ export class DataFrame { // ─── module-level helpers (extracted to keep methods lean) ─────────────────── +function isIndexLike(v: unknown): v is Index