Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
*.jl.mem
Manifest*.toml
/docs/build/
/data/
data/
.vscode/
*.txt
*.svg
.DS_Store
__pycache__/
bin/conda
env/
*.ipynb
*.ipynb
ldpc/benchcodes/
visualize/images
28 changes: 22 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
JL = julia --project
maxerror ?= $(nsample)

init:
$(JL) -e 'using Pkg; Pkg.instantiate()'
Expand All @@ -12,13 +13,28 @@ init-conda:
init-ldpc:
./ldpc/setup

generate-surface-samples:
$(JL) -e 'using DecoderBenchmarks;using TensorQEC; mkpath(joinpath("data","depolarizing")); mkpath(joinpath("data","surface_code")); generate_depolarizing_samples(collect(3:2:21).^2, collect(0.01:0.01:0.2), 10000, joinpath("data","depolarizing")); for d in 3:2:21 generate_code_data(SurfaceCode(d,d),joinpath("data","surface_code"),"surface_code_$$d") end'
generate-error-samples:
$(JL) -e 'using DecoderBenchmarks;using TensorQEC; mkpath(joinpath(@__DIR__,"data","depolarizing")); generate_depolarizing_samples($(nvec), $(pvec), $(nsample), joinpath(@__DIR__,"data","depolarizing"))'

run-benchmark-surface-BP:
$(JL) -e 'using DecoderBenchmarks;using TensorQEC; for d in 3:2:11 run_benchmark(SurfaceCode(d,d), collect(0.01:0.01:0.2), 10000, BPDecoder(), joinpath("data","surface_code","result","TensorQEC_BP"), joinpath("data","depolarizing");log_file="log.txt") end'
generate-code-data:
$(JL) -e 'using DecoderBenchmarks;using TensorQEC; generate_code_data($(codevec),joinpath(@__DIR__,"data","codes"))'

run-benchmark-ldpc-surface-BP:
benchmark-TensorQEC:
$(JL) -e 'using DecoderBenchmarks;using TensorQEC; run_benchmark($(codevec), $(pvec), $(nsample), $(maxerror), $(decoder), joinpath(@__DIR__,"data","result","TensorQEC");log_file="log.txt", filename_prefix=joinpath(@__DIR__,"data","result","files.txt"), relative_path="data/result/TensorQEC")'

benchmark-ldpc:
mkdir -p ldpc/data
$(JL) -e 'using DecoderBenchmarks;using TensorQEC; generate_code_data($(codevec),joinpath(@__DIR__,"ldpc","data"))'
./ldpc/run
rm -rf ldpc/data

generate-plotting-data:
$(JL) -e 'include(joinpath(@__DIR__,"visualize","generate_plotting_data.jl"));select_files_with_pattern($(patterns))'

benchmark-TensorQEC-Gurobi:
$(JL) -e 'using DecoderBenchmarks,Gurobi;using TensorQEC; run_benchmark($(codevec), $(pvec), $(nsample), $(maxerror), IPDecoder(Gurobi.Optimizer,false), joinpath(@__DIR__,"data","result","TensorQEC");log_file="log.txt", filename_prefix=joinpath(@__DIR__,"data","result","files.txt"), relative_path="data/result/TensorQEC")'

benchmark-ldpc-benchcode:
./ldpc/run

.PHONY: init generate-depolarizing-samples update make-data-path generate-surface-samples run-benchmark-surface-BP
.PHONY: init generate-error-samples update make-data-path benchmark-TensorQEC benchmark-ldpc generate-plotting-data
15 changes: 11 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,20 +1,27 @@
name = "DecoderBenchmarks"
uuid = "3bf0baa3-530d-4515-bb33-a07a4dee922a"
authors = ["nzy1997"]
version = "1.0.0-DEV"
authors = ["nzy1997"]

[deps]
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b"
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
TensorQEC = "0500ac79-7fb5-4262-aaea-37bb1845d1ef"

[sources]
TensorQEC = {path = "/Users/nizhongyi/jcode/TensorQEC.jl"}

[compat]
Dates = "1.11"
DelimitedFiles = "1.9"
Dates = "1"
DelimitedFiles = "1"
Distributed = "1"
Gurobi = "1"
JSON = "0.21"
Random = "1.11"
Random = "1"
julia = "1"

[extras]
Expand Down
53 changes: 46 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,42 @@ Clone the repository and run the following command in the root directory to inst
make init
```

Generate the samples:
To generate the code data, run the following command:
```bash
make generate-surface-samples
codevec=[SurfaceCode(3,3),SurfaceCode(5,5),SurfaceCode(7,7)] make generate-code-data
```
Here the `codevec` is a vector of codes, the code names are the same as the ones in the `TensorQEC` package. You can check [here](https://nzy1997.github.io/TensorQEC.jl/dev/generated/codes/) for the available codes in the `TensorQEC` package.

Run the benchmark:
The code data is generated in the `data/codes` directory as a json file, named as `code_name.json`. This file contains the following information:
- `code_name`: The name of the code.
- `qubit_num`: The number of physical qubits.
- `stabilizer_num`: The number of stabilizers.
- `pcm`: The parity check matrix.
- `logical_x`: The logical X operator.
- `logical_z`: The logical Z operator.

To benchmark the performance of the codes or decoders, you can run the benchmark directly. Error patterns are generated on the fly.
```bash
codevec=[SurfaceCode(3,3),SurfaceCode(5,5),SurfaceCode(7,7)] pvec=[0.01,0.02] nsample=100 maxerror=100 decoder="BPDecoder()" make benchmark-TensorQEC
```
The `decoder` is the decoder to use. The benchmark results are saved in the `data/result/TensorQEC` directory as a json file, named as `code=code_name_pmin=pmin_pmax=pmax_nsample=nsample_maxerror=maxerror_workers=workers_decoder=decoder.json`. The information includes
- `code_name`: The name of the code.
- `pvec`: The error probabilities.
- `nsample`: The number of samples.
- `max_error`: The maximum number of logical errors.
- `nsim`: The actual number of samples for each `p`.
- `error_count`: The logical error count for each `p`.
- `decoder`: The decoder.
- `time_res`: The average decoding time.
- `error_rate`: The logical error rate.

If you still want to generate fixed error samples for other workflows, you can use:
```bash
make run-benchmark-surface-BP
nvec=[3,5,7,9,11].^2 pvec=0.01:0.01:0.05 nsample=100 make generate-error-samples
```
The `nvec` is a vector of the qubit numbers, the `pvec` is a vector of the error rates, and the `nsample` is the number of samples to generate. The samples are generated in the `data/depolarizing` directory as a dat file, named as `n=n_p=p_nsample=nsample.dat`.

To run the benchmarks for python packages, first install a conda environment with the following command:
To run the benchmarks for python packages like [ldpc](https://github.com/quantumgizmos/ldpc), we first install a conda environment with the following command:
```bash
make init-conda
```
Expand All @@ -33,10 +58,24 @@ make init-ldpc

Then run the following command to run the benchmark:
```bash
make run-benchmark-ldpc-surface-BP
codevec=[SurfaceCode(3,3),SurfaceCode(5,5),SurfaceCode(7,7)] pvec=[0.01,0.02] nsample=100 make benchmark-ldpc
```
The results are saved in the `data/result/ldpc` directory as a json file with similar format as the `TensorQEC` results.


## Visualization

We can use the `visualize/viz.typ` file to visualize the results. `visualize-all` is a function that takes a list of tuples, each tuple contains some strings to filter the file names.

For example, to visualize the results of the `TensorQEC` package with 10000 samples and the `BPDecoder` decoder and the BP decoder in the `ldpc` package, we can run the following command:
```bash
visualize-all((("TensorQEC","10000","BP"),("ldpc","10000","BpOsdDecoder")))
```

This will find all the files in the `data/result` directory that include the strings "TensorQEC", "10000", "BP" or "ldpc", "10000", "BpOsdDecoder" and plot the results.

## Samples

Samples of the depolarizing channel are available at [OneDrive](https://hkustgz-my.sharepoint.com/:f:/g/personal/jinguoliu_hkust-gz_edu_cn/Eo4RiKqgPrFEj_ghttddtzwBrJb7Qajj2Q2CcZeTydAxyA?e=vrd9k1). The codes include:
- Surface code with $d = 9$ and $d = 21$ (TODO: more).
- BBCode [[144, 12, 12]].
- BBCode [[144, 12, 12]].
8 changes: 8 additions & 0 deletions benchmark/example_run.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
using DecoderBenchmarks
using TensorQEC
using DelimitedFiles

for d in [3,5,7]
c = SurfaceCode(d,d)
run_benchmark(c,0.01:0.01:0.2, 100, 100, IPDecoder(), joinpath(@__DIR__,"../data","result","TensorQEC");log_file="log.txt")
end
27 changes: 27 additions & 0 deletions benchmark/pythonrun.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
codevec=[BivariateBicycleCode(6,12, ((3,0),(0,1),(0,2)), ((0,3),(1,0),(2,0)))] pvec=[0.001,0.002,0.003,0.004,0.005,0.006,0.007,0.008,0.009,0.01] nsample=100 make benchmark-ldpc


nvec=[24,54,96,150] pvec=0.01:0.01:0.1 nsample=10000 make generate-error-samples
pvec=[0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1] nsample=10000 make benchmark-ldpc-benchcode


pvec=[0.01,0.02] nsample=10000 make benchmark-ldpc-benchcode

nvec=[600] pvec=[0.001,0.002,0.005,0.008,0.01,0.015,0.02] nsample=100000 make generate-error-samples

pvec=[0.001,0.002,0.005,0.008,0.01,0.015,0.02] nsample=100000 make benchmark-ldpc-benchcode

nvec=[600] pvec=[0.001,0.002,0.005] nsample=1000000 make generate-error-samples

pvec=[0.001,0.002,0.005] nsample=1000000 make benchmark-ldpc-benchcode


nvec=[1400] pvec=[0.001,0.002,0.005,0.008,0.01,0.015,0.02] nsample=100000 make generate-error-samples

nvec=[1400] pvec=[0.001,0.002] nsample=1000000 make generate-error-samples

pvec=[0.001,0.002] nsample=1000000 make benchmark-ldpc-benchcode


julia -p4

13 changes: 13 additions & 0 deletions benchmark/run.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
using DecoderBenchmarks
using TensorQEC
using Gurobi
using DelimitedFiles

d = 10
c = TensorQEC.FileCode(joinpath(@__DIR__,"../data","codes","bbx^-1y_$(d).txt"), "bbx^-1y_$(d)")
row_transformation = TensorQEC.Mod2.(readdlm(joinpath(@__DIR__,"../data","codes","bbx^-1y_$(d)_row_transformation.txt"), Bool))
column_transformation = TensorQEC.Mod2.(readdlm(joinpath(@__DIR__,"../data","codes","bbx^-1y_$(d)_column_transformation.txt"), Bool))

# run_benchmark(c,[0.0001,0.0002,0.0005,0.001,0.002,0.005,0.008,0.01,0.015,0.02], 1000, 1000, TToricDecoder(row_transformation,column_transformation,4,3,d^2), joinpath(@__DIR__,"../data","result","TensorQEC");log_file="log.txt")

run_benchmark_time(c,[0.0001,0.0002,0.0005,0.001,0.002,0.005,0.008,0.01,0.015,0.02], 1000, TToricDecoder(row_transformation,column_transformation,4,3,d^2), joinpath(@__DIR__,"../data","result","TensorQEC"))
43 changes: 40 additions & 3 deletions ldpc/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,41 @@
from functions import *
import os

for d in range(3,12,2):
run_benchmark(f"./data/surface_code/surface_code_{d}.json",[0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.19,0.2],10000,f"./data/surface_code/result/ldpc/","./data/depolarizing/")
from functions import run_benchmark


def run_selected_files(code_files, pvec, max_sim, max_error, workers, log_file):
result_dir = os.path.join(os.path.dirname(__file__), "..", "data", "result", "ldpc")
for file_path in code_files:
output_name = os.path.splitext(os.path.basename(file_path))[0]
run_benchmark(
file_path,
pvec,
max_sim,
max_error,
result_dir,
output_name,
workers=workers,
log_file=log_file,
)


def main():
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
code_files = [
os.path.join(project_root, "data", "codes", "bbx^-1y_10.json"),
]
pvec = [0.0001,0.0002,0.0005,0.001,0.002,0.005,0.008,0.01,0.015,0.02]
max_sim = 1000000
max_error = 500
workers = 6
log_file = os.path.join(project_root, "log.txt")

missing = [path for path in code_files if not os.path.isfile(path)]
if missing:
raise FileNotFoundError(f"Missing code files: {missing}")

run_selected_files(code_files, pvec, max_sim, max_error, workers, log_file)


if __name__ == "__main__":
main()
44 changes: 44 additions & 0 deletions ldpc/benchmark_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import os

import numpy as np
from functions import run_benchmark


def run_selected_files(code_files, pvec, max_sim, max_error, workers, log_file):
result_dir = os.path.join(os.path.dirname(__file__), "..", "data", "result", "ldpc")
for file_path in code_files:
output_name = os.path.splitext(os.path.basename(file_path))[0]
run_benchmark(
file_path,
pvec,
max_sim,
max_error,
result_dir,
output_name,
workers=workers,
log_file=log_file,
)


def main():
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
code_files = [
os.path.join(project_root, "data", "codes", "SurfaceCode(3, 3).json"),
os.path.join(project_root, "data", "codes", "SurfaceCode(5, 5).json"),
os.path.join(project_root, "data", "codes", "SurfaceCode(7, 7).json"),
]
pvec = np.arange(0.01, 0.21, 0.01).tolist()
max_sim = 100
max_error = 100
workers = 6
log_file = os.path.join(project_root, "log.txt")

missing = [path for path in code_files if not os.path.isfile(path)]
if missing:
raise FileNotFoundError(f"Missing code files: {missing}")

run_selected_files(code_files, pvec, max_sim, max_error, workers, log_file)


if __name__ == "__main__":
main()
37 changes: 37 additions & 0 deletions ldpc/benchmark_time.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import os

from functions import run_benchmark_time


def run_selected_files(code_files, pvec, max_sim, init_num):
result_dir = os.path.join(os.path.dirname(__file__), "..", "data", "result", "ldpc")
for file_path in code_files:
output_name = os.path.splitext(os.path.basename(file_path))[0]
run_benchmark_time(
file_path,
pvec,
max_sim,
result_dir,
output_name,
init_num=init_num,
)


def main():
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
code_files = [
os.path.join(project_root, "data", "codes", "bbx^-1y_10.json"),
]
pvec = [0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.008, 0.01, 0.015, 0.02]
max_sim = 10000
init_num = 100

missing = [path for path in code_files if not os.path.isfile(path)]
if missing:
raise FileNotFoundError(f"Missing code files: {missing}")

run_selected_files(code_files, pvec, max_sim, init_num)


if __name__ == "__main__":
main()
Loading
Loading